Page MenuHomec4science

create_lanes.sh
No OneTemporary

File Metadata

Created
Thu, Dec 26, 13:12

create_lanes.sh

#!/bin/bash
set -euo pipefail
# Script made from the page
# https://docs.ycrc.yale.edu/clusters-at-yale/guides/cryosparc/
script_path=$(dirname "$0") # relative
script_path=$(cd "${script_path}" && pwd) # absolutized and normalized
install_path="$HOME"/cryosparc
# Usage
usage () {
echo "Usage:"
echo " -p install path : prefix for installation [${install_path}] "
echo " -v : be verbose"
echo " -h : print this notice"
echo ""
}
VERBOSE=false
# Parse options
while getopts ":p:vh" opt; do
case $opt in
p)
install_path="${OPTARG}"
;;
v)
VERBOSE=true
;;
h)
usage
OPTIND=1
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage
OPTIND=1
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
usage
OPTIND=1
exit 1
;;
esac
done
# Reset OPTIND to allow the next invocation to work
OPTIND=1
message() {
if $VERBOSE
then
echo "${1}"
fi
}
echo "[Info] Starting the master if needed"
if [ $(${script_path}/cryosparcm.sh status | grep -c "CryoSPARC is not running") -eq 1 ]; then
${script_path}/cryosparcm.sh start
fi
user_name=$(whoami)
user_accounts=$(sacctmgr show assoc where user=${user_name} format=Account%100 -P | grep -v Account)
mkdir -p ${install_path}/site_configs && cd ${install_path}/site_configs
for _account in $user_accounts; do
if [ $(sacctmgr show assoc where account=${_account} format=ParentName%100 -P | grep -v "Par Name" | grep -c courses) -eq 0 ]; then
_worker_name=${HOSTNAME}-${_account}
for time in 1 6 12 24 48 72; do
mkdir -p ${install_path}/site_configs/${_worker_name}_${time}h && cd ${install_path}/site_configs/${_worker_name}_${time}h
cat << EOF > cluster_info.json
{
"name": "${_worker_name}_${time}h",
"worker_bin_path": "${install_path}/cryosparc_worker/bin/cryosparcw",
"cache_path": "/tmp/{{ cryosparc_username }}/cryosparc_cache",
"cache_reserve_mb": 10000,
"cache_quota_mb": 1000000,
"send_cmd_tpl": "{{ command }}",
"qsub_cmd_tpl": "sbatch {{ script_path_abs }}",
"qstat_cmd_tpl": "squeue -j {{ cluster_job_id }}",
"qdel_cmd_tpl": "scancel {{ cluster_job_id }}",
"qinfo_cmd_tpl": "sinfo"
}
EOF
cat << EOF > cluster_script.sh
#!/bin/bash -l
#SBATCH --job-name cryosparc_{{ project_uid }}_{{ job_uid }}
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task={{ num_cpu }}
#SBATCH --qos=gpu
#SBATCH --gres=gpu:{{ num_gpu }}
#SBATCH --mem 90GB
#SBATCH --time ${time}:00:00
#SBATCH -o {{ job_dir_abs }}/slurm.out
#SBATCH -e {{ job_dir_abs }}/slurm.err
#SBATCH -A ${_account}
module load gcc cuda python
mkdir -p /tmp/${USER}
ln -sf \${TMPDIR} /tmp/${USER}/cryosparc_cache
{{ run_cmd }}
EOF
${script_path}/cryosparcm.sh cluster connect
done
fi
done
for _account in $user_accounts; do
if [ $(sacctmgr show assoc where account=${_account} format=ParentName%100 -P | grep -v "Par Name" | grep -c courses) -eq 0 ]; then
_worker_name=${HOSTNAME}-${_account}-CPUOnly-Max20
for time in 1 6 12 24 48 72; do
mkdir -p ${install_path}/site_configs/${_worker_name}_${time}h && cd ${install_path}/site_configs/${_worker_name}_${time}h
cat << EOF > cluster_info.json
{
"name": "${_worker_name}_${time}h",
"worker_bin_path": "${install_path}/cryosparc_worker/bin/cryosparcw",
"cache_path": "/tmp/{{ cryosparc_username }}/cryosparc_cache",
"cache_reserve_mb": 10000,
"cache_quota_mb": 1000000,
"send_cmd_tpl": "{{ command }}",
"qsub_cmd_tpl": "sbatch {{ script_path_abs }}",
"qstat_cmd_tpl": "squeue -j {{ cluster_job_id }}",
"qdel_cmd_tpl": "scancel {{ cluster_job_id }}",
"qinfo_cmd_tpl": "sinfo"
}
EOF
cat << EOF > cluster_script.sh
#!/bin/bash -l
#SBATCH --job-name cryosparc_{{ project_uid }}_{{ job_uid }}
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task={{ num_cpu }}
#SBATCH --qos=gpu
#SBATCH --gres=gpu:1
#SBATCH --mem 90GB
#SBATCH --time ${time}:00:00
#SBATCH -o {{ job_dir_abs }}/slurm.out
#SBATCH -e {{ job_dir_abs }}/slurm.err
#SBATCH -A ${_account}
module load gcc cuda python
mkdir -p /tmp/${USER}
ln -sf \${TMPDIR} /tmp/${USER}/cryosparc_cache
{{ run_cmd }}
EOF
${script_path}/cryosparcm.sh cluster connect
done
fi
done
for _account in $user_accounts; do
if [ $(sacctmgr show assoc where account=${_account} format=ParentName%100 -P | grep -v "Par Name" | grep -c courses) -eq 0 ]; then
_worker_name=${HOSTNAME}-${_account}-SSDExclusive
for time in 1 6 12 24 48 72; do
mkdir -p ${install_path}/site_configs/${_worker_name}_${time}h && cd ${install_path}/site_configs/${_worker_name}_${time}h
cat << EOF > cluster_info.json
{
"name": "${_worker_name}_${time}h",
"worker_bin_path": "${install_path}/cryosparc_worker/bin/cryosparcw",
"cache_path": "/tmp/{{ cryosparc_username }}/cryosparc_cache",
"cache_reserve_mb": 10000,
"cache_quota_mb": 1000000,
"send_cmd_tpl": "{{ command }}",
"qsub_cmd_tpl": "sbatch {{ script_path_abs }}",
"qstat_cmd_tpl": "squeue -j {{ cluster_job_id }}",
"qdel_cmd_tpl": "scancel {{ cluster_job_id }}",
"qinfo_cmd_tpl": "sinfo"
}
EOF
cat << EOF > cluster_script.sh
#!/bin/bash -l
#SBATCH --job-name cryosparc_{{ project_uid }}_{{ job_uid }}
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task={{ num_cpu }}
#SBATCH --qos=gpu
#SBATCH --gres=gpu:{{ num_gpu }}
#SBATCH --mem 90GB
#SBATCH --time ${time}:00:00
#SBATCH --exclusive
#SBATCH -o {{ job_dir_abs }}/slurm.out
#SBATCH -e {{ job_dir_abs }}/slurm.err
#SBATCH -A ${_account}
module load gcc cuda/ python
mkdir -p /tmp/${USER}
ln -sf \${TMPDIR} /tmp/${USER}/cryosparc_cache
{{ run_cmd }}
EOF
${script_path}/cryosparcm.sh cluster connect
done
fi
done

Event Timeline