Page MenuHomec4science

create_reserved_lanes.sh
No OneTemporary

File Metadata

Created
Wed, Apr 24, 00:36

create_reserved_lanes.sh

#!/bin/bash
set -euo pipefail
# Script made from the page
# https://docs.ycrc.yale.edu/clusters-at-yale/guides/cryosparc/
script_path=$(dirname "$0") # relative
script_path=$(cd "${script_path}" && pwd) # absolutized and normalized
install_path="$HOME"/cryosparc
# Usage
usage () {
echo "Usage:"
echo " -p install path : prefix for installation [${install_path}] "
echo " -v : be verbose"
echo " -h : print this notice"
echo ""
}
VERBOSE=false
read -p "Enter group account at SCITAS: " hpc_account
read -p "Enter reservation name: " reservation_name
# Parse options
while getopts ":p:vh" opt; do
case $opt in
p)
install_path="${OPTARG}"
;;
v)
VERBOSE=true
;;
h)
usage
OPTIND=1
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage
OPTIND=1
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
usage
OPTIND=1
exit 1
;;
esac
done
# Reset OPTIND to allow the next invocation to work
OPTIND=1
message() {
if $VERBOSE
then
echo "${1}"
fi
}
echo "[Info] Starting the master if needed"
if [ $(${script_path}/cryosparcm.sh status | grep -c "CryoSPARC is not running") -eq 1 ]; then
${script_path}/cryosparcm.sh start
fi
cd ${install_path}/site_configs
for _account in $hpc_account; do
if [ $(sacctmgr show assoc where account=${_account} format=ParentName%100 -P | grep -v "Par Name" | grep -c courses) -eq 0 ]; then
_worker_name=${HOSTNAME}-${_account}-reserved
max_wall=$(sacctmgr show assoc where account=${_account} format=MaxWall -P | grep -v MaxWall | head -1)
for mem in 90; do
mkdir -p ${install_path}/site_configs/${_worker_name}_${mem}gb && cd ${install_path}/site_configs/${_worker_name}_${mem}gb
cat << EOF > cluster_info.json
{
"name": "${_worker_name}_${mem}gb",
"worker_bin_path": "${install_path}/cryosparc_worker/bin/cryosparcw",
"cache_path": "/tmp/{{ cryosparc_username }}/cryosparc_cache",
"cache_reserve_mb": 10000,
"cache_quota_mb": 1000000,
"send_cmd_tpl": "{{ command }}",
"qsub_cmd_tpl": "sbatch {{ script_path_abs }}",
"qstat_cmd_tpl": "squeue -j {{ cluster_job_id }}",
"qdel_cmd_tpl": "scancel {{ cluster_job_id }}",
"qinfo_cmd_tpl": "sinfo"
}
EOF
cat << EOF > cluster_script.sh
#!/bin/bash -l
#SBATCH --job-name cryosparc_{{ project_uid }}_{{ job_uid }}
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task={{ num_cpu }}
#SBATCH --qos=gpu
#SBATCH --gres=gpu:{{ num_gpu }}
#SBATCH --mem ${mem}GB
#SBATCH --time 3-00:00:00
#SBATCH -o {{ job_dir_abs }}/slurm.out
#SBATCH -e {{ job_dir_abs }}/slurm.err
#SBATCH -A ${_account}
#SBATCH --reservation $reservation_name
module load gcc cuda python
mkdir -p /tmp/${USER}
ln -sf \${TMPDIR} /tmp/${USER}/cryosparc_cache
{{ run_cmd }}
EOF
${script_path}/cryosparcm.sh cluster connect
done
fi
done
for _account in $hpc_account; do
if [ $(sacctmgr show assoc where account=${_account} format=ParentName%100 -P | grep -v "Par Name" | grep -c courses) -eq 0 ]; then
_worker_name=${HOSTNAME}-${_account}-reserved-CPUOnly-Max20
max_wall=$(sacctmgr show assoc where account=${_account} format=MaxWall -P | grep -v MaxWall | head -1)
for mem in 90; do
mkdir -p ${install_path}/site_configs/${_worker_name}_${mem}gb && cd ${install_path}/site_configs/${_worker_name}_${mem}gb
cat << EOF > cluster_info.json
{
"name": "${_worker_name}_${mem}gb",
"worker_bin_path": "${install_path}/cryosparc_worker/bin/cryosparcw",
"cache_path": "/tmp/{{ cryosparc_username }}/cryosparc_cache",
"cache_reserve_mb": 10000,
"cache_quota_mb": 1000000,
"send_cmd_tpl": "{{ command }}",
"qsub_cmd_tpl": "sbatch {{ script_path_abs }}",
"qstat_cmd_tpl": "squeue -j {{ cluster_job_id }}",
"qdel_cmd_tpl": "scancel {{ cluster_job_id }}",
"qinfo_cmd_tpl": "sinfo"
}
EOF
cat << EOF > cluster_script.sh
#!/bin/bash -l
#SBATCH --job-name cryosparc_{{ project_uid }}_{{ job_uid }}
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task={{ num_cpu }}
#SBATCH --qos=gpu
#SBATCH --gres=gpu:1
#SBATCH --mem ${mem}GB
#SBATCH --time 3-00:00:00
#SBATCH -o {{ job_dir_abs }}/slurm.out
#SBATCH -e {{ job_dir_abs }}/slurm.err
#SBATCH -A ${_account}
#SBATCH --reservation $reservation_name
module load gcc cuda python
mkdir -p /tmp/${USER}
ln -sf \${TMPDIR} /tmp/${USER}/cryosparc_cache
{{ run_cmd }}
EOF
${script_path}/cryosparcm.sh cluster connect
done
fi
done

Event Timeline