Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F98645593
create_lanes.sh
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Wed, Jan 15, 05:34
Size
5 KB
Mime Type
text/x-shellscript
Expires
Fri, Jan 17, 05:34 (1 d, 19 h)
Engine
blob
Format
Raw Data
Handle
23619408
Attached To
rSCCRYOSPARC scitas-cryosparc-scripts
create_lanes.sh
View Options
#!/bin/bash
set
-euo pipefail
# Script made from the page
# https://docs.ycrc.yale.edu/clusters-at-yale/guides/cryosparc/
script_path
=
$(
dirname
"$0"
)
# relative
script_path
=
$(
cd
"${script_path}"
&&
pwd
)
# absolutized and normalized
install_path
=
"$HOME"
/cryosparc
# set up some more paths
db_path
=
${
install_path
}
/database
worker_path
=
${
install_path
}
/cryosparc2_worker
# Usage
usage
()
{
echo
"Usage:"
echo
" -p install path : prefix for installation [${install_path}] "
echo
" -v : be verbose"
echo
" -h : print this notice"
echo
""
}
VERBOSE
=
false
# Parse options
while
getopts
":p:vh"
opt;
do
case
$opt
in
p
)
install_path
=
"${OPTARG}"
;;
v
)
VERBOSE
=
true
;;
h
)
usage
OPTIND
=
1
exit
0
;;
\?
)
echo
"Invalid option: -$OPTARG"
>&2
usage
OPTIND
=
1
exit
1
;;
:
)
echo
"Option -$OPTARG requires an argument."
>&2
usage
OPTIND
=
1
exit
1
;;
esac
done
# Reset OPTIND to allow the next invocation to work
OPTIND
=
1
message
()
{
if
$VERBOSE
then
echo
"${1}"
fi
}
echo
"[Info] Starting the master if needed"
if
[
$(${
install_path
}
/cryosparc_master/bin/cryosparcm.sh status | grep -c
"CryoSPARC is not running"
)
-eq 1
]
;
then
${
install_path
}
/cryosparc_master/bin/cryosparcm.sh start
fi
user_name
=
$(
whoami
)
user_accounts
=
$(
sacctmgr show assoc where
user
=
${
user_name
}
format
=
Account%100 -P | grep -v Account
)
mkdir -p
${
install_path
}
/site_configs
&&
cd
${
install_path
}
/site_configs
for
_account in
$user_accounts
;
do
if
[
$(
sacctmgr show assoc where
account
=
${
_account
}
format
=
ParentName%100 -P | grep -v
"Par Name"
| grep -c courses
)
-eq 0
]
;
then
_worker_name
=
${
HOSTNAME
}
-
${
_account
}
for
time
in 1 6 12 24 48 72;
do
mkdir -p
${
install_path
}
/site_configs/
${
_worker_name
}
_
${
time
}
h
&&
cd
${
install_path
}
/site_configs/
${
_worker_name
}
_
${
time
}
h
cat
<< EOF > cluster_info.json
{
"name": "${_worker_name}_${time}h",
"worker_bin_path": "${install_path}/cryosparc_worker/bin/cryosparcw",
"cache_path": "/tmp/{{ cryosparc_username }}/cryosparc_cache",
"cache_reserve_mb": 10000,
"cache_quota_mb": 1000000,
"send_cmd_tpl": "{{ command }}",
"qsub_cmd_tpl": "sbatch {{ script_path_abs }}",
"qstat_cmd_tpl": "squeue -j {{ cluster_job_id }}",
"qdel_cmd_tpl": "scancel {{ cluster_job_id }}",
"qinfo_cmd_tpl": "sinfo"
}
EOF
cat
<< EOF > cluster_script.sh
#!/bin/bash -l
#SBATCH --job-name cryosparc_{{ project_uid }}_{{ job_uid }}
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task={{ num_cpu }}
#SBATCH --gres=gpu:{{ num_gpu }}
#SBATCH --mem 90GB
#SBATCH --time ${time}:00:00
#SBATCH -o {{ job_dir_abs }}/slurm.out
#SBATCH -e {{ job_dir_abs }}/slurm.err
#SBATCH -A ${_account}
module load gcc cuda/11.0.2 python
mkdir -p /tmp/${USER}
ln -sf \${TMPDIR} /tmp/${USER}/cryosparc_cache
{{ run_cmd }}
EOF
${
install_path
}
/cryosparc_master/bin/cryosparcm cluster connect
done
fi
done
for
_account in
$user_accounts
;
do
if
[
$(
sacctmgr show assoc where
account
=
${
_account
}
format
=
ParentName%100 -P | grep -v
"Par Name"
| grep -c courses
)
-eq 0
]
;
then
_worker_name
=
${
HOSTNAME
}
-
${
_account
}
-CPUOnly-Max20
for
time
in 1 6 12 24 48 72;
do
mkdir -p
${
install_path
}
/site_configs/
${
_worker_name
}
_
${
time
}
h
&&
cd
${
install_path
}
/site_configs/
${
_worker_name
}
_
${
time
}
h
cat
<< EOF > cluster_info.json
{
"name": "${_worker_name}_${time}h",
"worker_bin_path": "${install_path}/cryosparc_worker/bin/cryosparcw",
"cache_path": "/tmp/{{ cryosparc_username }}/cryosparc_cache",
"cache_reserve_mb": 10000,
"cache_quota_mb": 1000000,
"send_cmd_tpl": "{{ command }}",
"qsub_cmd_tpl": "sbatch {{ script_path_abs }}",
"qstat_cmd_tpl": "squeue -j {{ cluster_job_id }}",
"qdel_cmd_tpl": "scancel {{ cluster_job_id }}",
"qinfo_cmd_tpl": "sinfo"
}
EOF
cat
<< EOF > cluster_script.sh
#!/bin/bash -l
#SBATCH --job-name cryosparc_{{ project_uid }}_{{ job_uid }}
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task={{ num_cpu }}
#SBATCH --gres=gpu:1
#SBATCH --mem 90GB
#SBATCH --time ${time}:00:00
#SBATCH -o {{ job_dir_abs }}/slurm.out
#SBATCH -e {{ job_dir_abs }}/slurm.err
#SBATCH -A ${_account}
module load gcc cuda/11.0.2 python
mkdir -p /tmp/${USER}
ln -sf \${TMPDIR} /tmp/${USER}/cryosparc_cache
{{ run_cmd }}
EOF
${
install_path
}
/cryosparc_master/bin/cryosparcm cluster connect
done
fi
done
for
_account in
$user_accounts
;
do
if
[
$(
sacctmgr show assoc where
account
=
${
_account
}
format
=
ParentName%100 -P | grep -v
"Par Name"
| grep -c courses
)
-eq 0
]
;
then
_worker_name
=
${
HOSTNAME
}
-
${
_account
}
-SSDExclusive
for
time
in 1 6 12 24 48 72;
do
mkdir -p
${
install_path
}
/site_configs/
${
_worker_name
}
_
${
time
}
h
&&
cd
${
install_path
}
/site_configs/
${
_worker_name
}
_
${
time
}
h
cat
<< EOF > cluster_info.json
{
"name": "${_worker_name}_${time}h",
"worker_bin_path": "${install_path}/cryosparc_worker/bin/cryosparcw",
"cache_path": "/tmp/{{ cryosparc_username }}/cryosparc_cache",
"cache_reserve_mb": 10000,
"cache_quota_mb": 1000000,
"send_cmd_tpl": "{{ command }}",
"qsub_cmd_tpl": "sbatch {{ script_path_abs }}",
"qstat_cmd_tpl": "squeue -j {{ cluster_job_id }}",
"qdel_cmd_tpl": "scancel {{ cluster_job_id }}",
"qinfo_cmd_tpl": "sinfo"
}
EOF
cat
<< EOF > cluster_script.sh
#!/bin/bash -l
#SBATCH --job-name cryosparc_{{ project_uid }}_{{ job_uid }}
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task={{ num_cpu }}
#SBATCH --gres=gpu:{{ num_gpu }}
#SBATCH --mem 90GB
#SBATCH --time ${time}:00:00
#SBATCH --exclusive
#SBATCH -o {{ job_dir_abs }}/slurm.out
#SBATCH -e {{ job_dir_abs }}/slurm.err
#SBATCH -A ${_account}
module load gcc cuda/11.0.2 python
mkdir -p /tmp/${USER}
ln -sf \${TMPDIR} /tmp/${USER}/cryosparc_cache
{{ run_cmd }}
EOF
${
install_path
}
/cryosparc_master/bin/cryosparcm cluster connect
done
fi
done
#${install_path}/cryosparc_master/bin/cryosparcm.sh stop
Event Timeline
Log In to Comment