diff --git a/examples/spark.slurm b/examples/spark.slurm index 170aaa2..92efdce 100644 --- a/examples/spark.slurm +++ b/examples/spark.slurm @@ -1,42 +1,42 @@ #!/bin/bash # #SBATCH --nodes=2 # ntasks per node MUST be one, because multiple slaves per work doesn't # work well with slurm + spark in this script (they would need increasing # ports among other things) #SBATCH --ntasks-per-node=1 #SBATCH --cpus-per-task=24 #SBATCH --mem=8192 # Beware! $HOME will not be expanded and invalid paths will result Slurm jobs # hanging indefinitely with status CG (completing) when calling scancel! ##SBATCH --time=96:00:00 #SBATCH --partition=scitas #SBATCH --qos=scitas #set -x # echo "---- starting $0 on $HOSTNAME" echo # MASTER_NODE="" -./start-spark.sh +start-spark.sh echo "configuration done..." set -x # #echo $MASTER_NODE #echo $MASTER # #MASTER_IP=$(cat ./sparklogs_${SLURM_JOBID}/spark_master) MASTER_IP=$(cat ${SLURM_JOBID}_spark_master) # echo $MASTER_IP time time spark-submit \ --executor-memory 5G \ --master $MASTER_IP \ ./pi.py # stop-spark.sh #scancel -u foureste #spark-submit --master $MASTER ./pi.py