Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F109598627
study_sugama_J_K_params.py
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Tue, Apr 22, 14:58
Size
8 KB
Mime Type
text/x-python
Expires
Thu, Apr 24, 14:58 (2 d)
Engine
blob
Format
Raw Data
Handle
25716419
Attached To
rCOSOLVER COSOlver
study_sugama_J_K_params.py
View Options
import
os
import
sys
import
f90nml
import
numpy
as
np
import
h5py
import
time
# custom tools
import
tools
GK
=
False
# directories
cwd_directory
=
os
.
getcwd
()
scandir_name
=
'sugama.J.K.'
+
(
'gk'
if
GK
else
'dk'
)
scandir_path
=
os
.
path
.
join
(
cwd_directory
,
scandir_name
)
# read input namelist
input_file
=
os
.
path
.
join
(
cwd_directory
,
'sugama.J.K.fort.90.model'
)
with
open
(
input_file
)
as
fh
:
inputs
=
f90nml
.
read
(
fh
)
# Scans
J_list
=
np
.
arange
(
0
,
11
)
K_list
=
np
.
arange
(
0
,
11
)
# Job infos
job_name_base
=
'sugama.J.K'
job_file_name
=
'job.submit.cmd'
# Get args
MODE_SCAN
=
'scan'
MODE_ANALYSE
=
'analyse'
MODE_RUN_CLUSTER
=
'run_cluster'
selected_mode
=
None
if
len
(
sys
.
argv
)
>=
2
:
selected_mode
=
sys
.
argv
[
1
]
.
lower
()
def
print_preamble
():
print
(
'=== Study Sugama J,K parameters ==='
)
print
(
'- Run from:
%s
'
%
cwd_directory
)
def
get_job_name
(
J
,
K
):
return
'job_J=
%02i
_K=
%02i
'
%
(
J
,
K
)
def
get_job_directory
(
J
,
K
):
return
os
.
path
.
join
(
scandir_path
,
get_job_name
(
J
,
K
))
def
get_output_path
(
job_directory
):
return
os
.
path
.
join
(
job_directory
,
'output.txt'
)
# MODE SCAN scan for J and K and generate data
if
selected_mode
==
MODE_SCAN
:
print_preamble
()
if
tools
.
mkdir
(
scandir_path
):
print
(
'- Generating directory:
%s
'
%
scandir_name
)
timestamp_start
=
time
.
time
()
print
(
''
,
end
=
''
)
for
iJ
,
J
in
enumerate
(
J_list
):
for
iK
,
K
in
enumerate
(
K_list
):
job_directory
=
get_job_directory
(
J
,
K
)
tools
.
mkdir
(
job_directory
)
timestamp_now
=
time
.
time
()
print
(
'
\r
- Scan J=
%02i
/ K=
%02i
[
%.2f%%
]
\t
%.2f
s elapsed...'
%
(
J
,
K
,
100
*
(
len
(
K_list
)
*
iJ
+
iK
+
1
)
/
(
len
(
J_list
)
*
len
(
K_list
)),
(
timestamp_now
-
timestamp_start
)),
end
=
''
)
# copy T4.in file
os
.
system
(
"cp "
+
tools
.
T4_in_path
+
" "
+
os
.
path
.
join
(
job_directory
,
"T4.in"
)
)
# We set parameters
inputs
[
'basic'
][
'impsugamajmax'
]
=
J
inputs
[
'basic'
][
'impsugamakmax'
]
=
K
inputs
[
'operator_model'
][
'gke'
]
=
1
if
GK
else
0
inputs
[
'operator_model'
][
'gki'
]
=
1
if
GK
else
0
inputs
.
write
(
os
.
path
.
join
(
job_directory
,
"fort.90"
),
'w'
)
# Run
os
.
chdir
(
job_directory
)
script_stream
=
os
.
popen
(
tools
.
exec_path
)
script_output
=
script_stream
.
read
()
output_file
=
open
(
get_output_path
(
job_directory
),
'w'
)
output_file
.
write
(
script_output
)
output_file
.
close
()
print
()
print
(
'- Total time:
%.2f
s'
%
(
time
.
time
()
-
timestamp_start
))
# Go back to script_dir
os
.
chdir
(
cwd_directory
)
# MODE RUN_CLUSTER run scan on cluster
elif
selected_mode
==
MODE_RUN_CLUSTER
:
print_preamble
()
if
tools
.
mkdir
(
scandir_path
):
print
(
'- Generating directory:
%s
'
%
scandir_name
)
job_ids
=
[]
timestamp_start
=
time
.
time
()
for
iJ
,
J
in
enumerate
(
J_list
):
for
iK
,
K
in
enumerate
(
K_list
):
job_directory
=
get_job_directory
(
J
,
K
)
tools
.
mkdir
(
job_directory
)
# copy T4.in file
os
.
system
(
"cp "
+
tools
.
T4_in_path
+
" "
+
os
.
path
.
join
(
job_directory
,
"T4.in"
)
)
# Create symbolic link to exec
if
os
.
path
.
exists
(
os
.
path
.
join
(
job_directory
,
tools
.
exec_name
)):
os
.
remove
(
os
.
path
.
join
(
job_directory
,
tools
.
exec_name
))
os
.
system
(
'ln -s
%s
%s
'
%
(
tools
.
exec_path
,
os
.
path
.
join
(
job_directory
,
tools
.
exec_name
)))
# We set parameters
inputs
[
'basic'
][
'impsugamajmax'
]
=
J
inputs
[
'basic'
][
'impsugamakmax'
]
=
K
inputs
[
'operator_model'
][
'gke'
]
=
1
if
GK
else
0
inputs
[
'operator_model'
][
'gki'
]
=
1
if
GK
else
0
inputs
.
write
(
os
.
path
.
join
(
job_directory
,
"fort.90"
),
'w'
)
# Create job batch script
job_file_path
=
os
.
path
.
join
(
job_directory
,
job_file_name
)
job_config_file
=
open
(
job_file_path
,
'w'
)
job_config_file
.
write
(
'#!/bin/bash
\n
'
)
job_config_file
.
write
(
'#SBATCH --job-name=
%s
\n
'
%
(
job_name_base
+
"/"
+
get_job_name
(
J
,
K
)))
job_config_file
.
write
(
'#SBATCH --time=00:10:00
\n
'
)
job_config_file
.
write
(
'#SBATCH --nodes=1
\n
'
)
job_config_file
.
write
(
'#SBATCH --ntasks=8
\n
'
)
job_config_file
.
write
(
'#SBATCH --cpus-per-task=1
\n
'
)
job_config_file
.
write
(
'#SBATCH --output=output.txt
\n
'
)
job_config_file
.
write
(
'#SBATCH --error=errors.txt
\n
'
)
job_config_file
.
write
(
'module purge
\n
'
)
job_config_file
.
write
(
'module load PrgEnv-intel/17.0
\n
'
)
job_config_file
.
write
(
'srun ./CO 2 2 4
\n
'
)
job_config_file
.
close
()
# Run
os
.
chdir
(
job_directory
)
# os.system('rm *.o *.e')
cmd_out
=
os
.
popen
(
'sbatch
%s
'
%
job_file_name
)
.
read
()
job_id
=
int
(
cmd_out
.
split
(
'job'
)[
-
1
])
job_ids
.
append
(
job_id
)
tools
.
wait_untile_all_jobs_done
(
job_ids
)
print
(
'- Total time:
%.2f
s'
%
(
time
.
time
()
-
timestamp_start
))
# Go back to script_dir
os
.
chdir
(
cwd_directory
)
# MODE ANALYSE read generated data and perform analysis
elif
selected_mode
==
MODE_ANALYSE
:
print_preamble
()
# Define vars
CeiT_volume
=
[]
CeiF_volume
=
[]
CieT_volume
=
[]
CieF_volume
=
[]
Cii_volume
=
[]
Cee_volume
=
[]
for
iJ
,
J
in
enumerate
(
J_list
):
for
iK
,
K
in
enumerate
(
K_list
):
job_directory
=
get_job_directory
(
J
,
K
)
if
not
os
.
path
.
exists
(
job_directory
):
print
(
'- ERROR J=
%i
/ K=
%i
: job_directory does not exists!'
)
continue
# Get matrices
h5_ei
=
h5py
.
File
(
os
.
path
.
join
(
job_directory
,
'ei.h5'
),
'r'
)
CeiT
=
np
.
array
(
h5_ei
[
'Ceipj'
][
'CeipjT'
])
CeiF
=
np
.
array
(
h5_ei
[
'Ceipj'
][
'CeipjF'
])
CeiT_volume
.
append
(
np
.
copy
(
CeiT
))
CeiF_volume
.
append
(
np
.
copy
(
CeiF
))
h5_ie
=
h5py
.
File
(
os
.
path
.
join
(
job_directory
,
'ie.h5'
),
'r'
)
CieT
=
h5_ei
[
'Ceipj'
][
'CeipjT'
]
CieF
=
h5_ei
[
'Ceipj'
][
'CeipjF'
]
CieT_volume
.
append
(
CieT
)
CieF_volume
.
append
(
CieF
)
h5_self
=
h5py
.
File
(
os
.
path
.
join
(
job_directory
,
'self.h5'
),
'r'
)
Cee
=
h5_self
[
'Caapj'
][
'Ceepj'
]
Cii
=
h5_self
[
'Caapj'
][
'Ciipj'
]
Cee_volume
.
append
(
Cee
)
Cii_volume
.
append
(
Cii
)
#break # TODO(Sam): Remove...
#break # TODO(Sam): Remove...
# Wrap up volumes
CeiT_volume
=
np
.
array
(
CeiT_volume
)
CeiF_volume
=
np
.
array
(
CeiF_volume
)
CieT_volume
=
np
.
array
(
CieT_volume
)
CieF_volume
=
np
.
array
(
CieF_volume
)
Cii_volume
=
np
.
array
(
Cii_volume
)
Cee_volume
=
np
.
array
(
Cee_volume
)
# TODO(Sam): Check pk on a pas d'évolution sur les éléments de matrices...
matrix_size
=
CeiT_volume
.
shape
[
1
]
NJ
,
NK
=
len
(
J_list
),
len
(
K_list
)
CeiT_volume
=
CeiT_volume
.
reshape
([
NJ
,
NK
,
matrix_size
,
matrix_size
])
CeiF_volume
=
CeiF_volume
.
reshape
([
NJ
,
NK
,
matrix_size
,
matrix_size
])
CieT_volume
=
CieT_volume
.
reshape
([
NJ
,
NK
,
matrix_size
,
matrix_size
])
CieF_volume
=
CieF_volume
.
reshape
([
NJ
,
NK
,
matrix_size
,
matrix_size
])
Cee_volume
=
Cee_volume
.
reshape
([
NJ
,
NK
,
matrix_size
,
matrix_size
])
Cii_volume
=
Cii_volume
.
reshape
([
NJ
,
NK
,
matrix_size
,
matrix_size
])
print
(
'
\n
- Diff eiT:'
)
dK
=
np
.
diff
(
CeiT_volume
,
axis
=
1
)
print
(
dK
[
NJ
-
1
,
:,
:,
:]
.
shape
)
print
(
np
.
max
(
np
.
diff
(
CeiT_volume
,
axis
=
0
)))
print
(
np
.
max
(
np
.
diff
(
CeiT_volume
,
axis
=
1
)))
# print('\n- Diff eiF:')
# print(np.max(np.diff(CeiF_volume, axis=0)))
# print(np.max(np.diff(CeiF_volume, axis=1)))
# print('\n- Diff ii:')
# print(np.max(np.diff(Cii_volume, axis=0)))
# print(np.max(np.diff(Cii_volume, axis=1)))
# print(CeiT_volume[:, :, 2,1])
# NO MODE or WRONG MODE error
else
:
if
selected_mode
:
print
(
'MODE "
%s
" does not exist!'
%
selected_mode
)
else
:
print
(
'You must specify the program MODE:'
)
print
(
'> python study_sugama_J_K_params.py [MODE]...'
)
print
(
'-
%8s
\t
Start a scan of parameters J,K'
%
MODE_SCAN
)
print
(
'-
%8s
\t
Start a scan on cluster'
%
MODE_RUN_CLUSTER
)
print
(
'-
%8s
\t
Run analysis of results'
%
MODE_ANALYSE
)
Event Timeline
Log In to Comment