Page MenuHomec4science

pi.slurm
No OneTemporary

File Metadata

Created
Sat, Nov 9, 14:31

pi.slurm

#!/bin/bash
#
#SBATCH --nodes=2
# ntasks per node MUST be one, because multiple slaves per work doesn't
# work well with slurm + spark in this script (they would need increasing
# ports among other things)
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=24
#SBATCH --mem=8192
# Beware! $HOME will not be expanded and invalid paths will result Slurm jobs
# hanging indefinitely with status CG (completing) when calling scancel!
#SBATCH --time=00:30:00
##SBATCH --partition=scitas
#set -x
#
module load spark/2.0.2
### modify here with correct path to scripts directory
export PATH=/home/rezzonic/scitas_sparkservice/scripts:$PATH
### end modify
echo "---- starting $0 on $HOSTNAME"
echo
#
MASTER_NODE=""
start-spark.sh
echo "configuration done..."
set -x
#
#echo $MASTER_NODE
#echo $MASTER
#
#MASTER_IP=$(cat ./sparklogs_${SLURM_JOBID}/spark_master)
MASTER_IP=$(cat ${SLURM_JOBID}_spark_master)
#
echo $MASTER_IP
time time spark-submit \
--executor-memory 5G \
--master $MASTER_IP \
./pi.py
#
stop-spark.sh
#scancel -u foureste
#spark-submit --master $MASTER ./pi.py

Event Timeline