Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F92424401
convert_temperatures.py
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Wed, Nov 20, 04:43
Size
30 KB
Mime Type
text/x-python
Expires
Fri, Nov 22, 04:43 (1 d, 23 h)
Engine
blob
Format
Raw Data
Handle
22439945
Attached To
R11910 Additive Manufacturing Work
convert_temperatures.py
View Options
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 25 14:38:47 2022
@author: ekinkubilay
"""
import
os
import
sys
#import h5py
sys
.
path
.
insert
(
0
,
os
.
path
.
join
(
os
.
getcwd
(),
"/home/kubilay/software/muspectre/build/language_bindings/python"
))
sys
.
path
.
insert
(
0
,
os
.
path
.
join
(
os
.
getcwd
(),
"/home/kubilay/software/muspectre/build/language_bindings/libmufft/python"
))
sys
.
path
.
insert
(
0
,
os
.
path
.
join
(
os
.
getcwd
(),
"/home/kubilay/software/muspectre/build/language_bindings/libmugrid/python"
))
import
muFFT
import
muSpectre
as
msp
import
numpy
as
np
import
muGrid
from
muFFT
import
Stencils2D
from
muFFT
import
Stencils3D
import
cProfile
,
pstats
import
time
as
timer
from
mpi4py
import
MPI
from
dolfin
import
Mesh
,
XDMFFile
,
MeshTransformation
,
Point
,
HDF5File
,
FunctionSpace
,
Function
from
petsc4py
import
PETSc
import
argparse
import
gc
#from transfer_previous_layer import transfer_previous_layer
def
update_temperature_field
(
cell
,
temperature_array
):
sub_domain_loc
=
cell
.
subdomain_locations
#temperature = np.zeros((cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]))
temperature
=
np
.
zeros
((
1
,
cell
.
nb_quad_pts
,
cell
.
nb_subdomain_grid_pts
[
0
],
cell
.
nb_subdomain_grid_pts
[
1
],
cell
.
nb_subdomain_grid_pts
[
2
]))
for
pixel_id
,
pixel_coord
in
cell
.
pixels
.
enumerate
():
i
,
j
,
k
=
pixel_coord
[
0
],
pixel_coord
[
1
],
pixel_coord
[
2
]
a
=
i
-
sub_domain_loc
[
0
]
b
=
j
-
sub_domain_loc
[
1
]
c
=
k
-
sub_domain_loc
[
2
]
for
q
in
range
(
cell
.
nb_quad_pts
):
temperature
[
0
,
q
,
a
,
b
,
c
]
=
np
.
max
([
temperature_array
[
0
,
q
,
i
,
j
,
k
],
0
])
+
293
return
temperature
def
parse_args
():
"""
Function to handle arguments
"""
parser
=
argparse
.
ArgumentParser
(
description
=
'Run given layer'
)
parser
.
add_argument
(
"layer_no"
,
type
=
int
,
help
=
'layer_number'
)
parser
.
add_argument
(
"mu_rank"
,
type
=
int
,
help
=
'number of processes for muSpectre'
)
args
=
parser
.
parse_args
()
return
args
def
main
():
args
=
parse_args
()
layer_no
=
args
.
layer_no
os
.
chdir
(
'/scratch/kubilay/mechanical_model/results'
)
#comm_py is WORLD communicator (with even size), it is split into two
#set muspectre communicator
from
mpi4py
import
MPI
comm_py
=
MPI
.
COMM_WORLD
rank_py
=
comm_py
.
rank
size_py
=
comm_py
.
size
#print("Hello! I'm rank %d from %d running in total..." % (comm_py.rank, comm_py.size))
#size must be even, color is tags for the split ranks
mu_rank
=
args
.
mu_rank
color
=
np
.
sign
(
int
(
rank_py
/
(
size_py
-
mu_rank
)))
#split communicator
comm
=
comm_py
.
Split
(
color
,
rank_py
)
rank
=
comm
.
rank
size
=
comm
.
size
#print(rank_py, size_py, rank, size, color)
#print("Total number of muSpectre tasks: %d \t Total number of fenics tasks: %d" % (comm_py.size-size, size))
#define empty variable for broadcast buffer
nb_steps
=
None
dx
,
dy
,
dz
=
0.0005
,
0.0005
,
0.00003
z_lim
=
0.00801
x_lim
=
0.055
element_dimensions
=
[
dx
,
dy
,
dz
]
#define global simulation parameters
dim
=
3
nb_quad
=
6
#nb_domain_grid_pts = [11,11,11]
#define constant material properties
young
=
208
#GPa
poisson
=
0.3
#unitless
alpha_s
=
0.55
#unitless
Delta_E_bs
=
1
#eV
epsilon_sat
=
0.35
# unitless
sigma_gb
=
0.150
#GPa
sigma_s0
=
0.320
#GPa
sigma_f
=
1.240
#GPa
## define the convergence tolerance for the Newton-Raphson increment
newton_tol
=
1e-6
equil_tol
=
newton_tol
## tolerance for the solver of the linear cell
cg_tol
=
-
1e-6
## macroscopic strain
strain_step
=
np
.
zeros
((
3
,
3
))
#no applied external strain
maxiter
=
1000
# for linear cell solver
#exact coordinates of each quadrature point relative to the local element coordinates
quad_coords
=
[
np
.
array
([
0.75
,
0.5
,
0.25
]),
np
.
array
([
0.75
,
0.25
,
0.5
]),
np
.
array
([
0.5
,
0.75
,
0.25
]),
np
.
array
([
0.25
,
0.75
,
0.5
]),
np
.
array
([
0.5
,
0.25
,
0.75
]),
np
.
array
([
0.25
,
0.5
,
0.75
])]
quad_coords
=
quad_coords
*
np
.
array
([
dx
,
dy
,
dz
])
if
color
==
0
:
#local size and rank on the split communicator, this communicator is called comm
rank
=
comm
.
rank
procs
=
comm
.
size
other_size
=
size_py
-
size
if
rank
==
0
:
print
(
"Total number of muSpectre tasks:
%d
\t
Total number of fenics tasks:
%d
"
%
(
comm_py
.
size
-
size
,
size
))
start_time
=
MPI
.
Wtime
()
send_ranks
=
np
.
array_split
(
np
.
arange
(
0
,
other_size
,
1
)
+
size
,
size
)[
rank
]
#print(send_ranks)
from
dolfin
import
Mesh
,
XDMFFile
,
MeshTransformation
,
Point
,
HDF5File
,
FunctionSpace
,
Function
#get the geometry and temperature via fenics
mesh
=
Mesh
(
comm
)
#read the mesh and the temperature function at initial timestep
#using Fenics within the FEM communicator
filename
=
"/work/lammm/kubilay/mesh/layer_"
+
str
(
layer_no
)
.
zfill
(
3
)
+
".xdmf"
temperature_filename
=
"/work/lammm/kubilay/results/temperature/temperature_layer_"
+
str
(
layer_no
)
.
zfill
(
3
)
+
".h5"
f
=
XDMFFile
(
comm
,
filename
)
f
.
read
(
mesh
)
MeshTransformation
.
rescale
(
mesh
,
1e-3
,
Point
(
0
,
0
,
0
))
#in m
f
.
close
()
del
(
f
)
gc
.
collect
()
bounding_box
=
mesh
.
bounding_box_tree
()
hdf5_file
=
HDF5File
(
comm
,
temperature_filename
,
"r"
)
V
=
FunctionSpace
(
mesh
,
"CG"
,
1
)
temperature_field_store
=
Function
(
V
)
nb_steps
=
hdf5_file
.
attributes
(
'Temperature'
)[
'count'
]
-
1
timestamp_arr
=
np
.
zeros
(
nb_steps
+
1
)
for
t
in
range
(
nb_steps
+
1
):
vec_name
=
"/Temperature/vector_
%d
"
%
t
time
=
hdf5_file
.
attributes
(
vec_name
)[
'timestamp'
]
timestamp_arr
[
t
]
=
time
no
=
0
vec_name
=
"/Temperature/vector_
%d
"
%
no
hdf5_file
.
read
(
temperature_field_store
,
vec_name
)
#obtain local domain dimensions
xmax
=
np
.
max
(
mesh
.
coordinates
()[:,
0
])
xmin
=
np
.
min
(
mesh
.
coordinates
()[:,
0
])
ymax
=
np
.
max
(
mesh
.
coordinates
()[:,
1
])
ymin
=
np
.
min
(
mesh
.
coordinates
()[:,
1
])
zmax
=
np
.
max
(
mesh
.
coordinates
()[:,
2
])
zmin
=
np
.
min
(
mesh
.
coordinates
()[:,
2
])
#communicate max/min domain values within FEM communicator
xmax
=
comm
.
allreduce
(
xmax
,
op
=
MPI
.
MAX
)
ymax
=
comm
.
allreduce
(
ymax
,
op
=
MPI
.
MAX
)
zmax
=
comm
.
allreduce
(
zmax
,
op
=
MPI
.
MAX
)
xmin
=
comm
.
allreduce
(
xmin
,
op
=
MPI
.
MIN
)
ymin
=
comm
.
allreduce
(
ymin
,
op
=
MPI
.
MIN
)
zmin
=
comm
.
allreduce
(
zmin
,
op
=
MPI
.
MIN
)
#z_lim = zmin #DELETE TO CONTROL LAYERS
#calculate domain lengths and element dimensions
domain_lengths
=
[
x_lim
-
xmin
,
ymax
-
ymin
,
zmax
-
z_lim
]
nb_domain_grid_pts
=
list
(
np
.
array
(
domain_lengths
)
/
np
.
array
(
element_dimensions
)
+
1
)
nb_domain_grid_pts
=
[
round
(
i
)
for
i
in
nb_domain_grid_pts
]
#array to store material (and not vacuum or base plate)
bulk
=
np
.
zeros
(
nb_domain_grid_pts
)
#domain discretisation TO DO:muspectre has this array already
x_dim
=
np
.
linspace
(
xmin
,
x_lim
,
nb_domain_grid_pts
[
0
])
y_dim
=
np
.
linspace
(
ymin
,
ymax
,
nb_domain_grid_pts
[
1
])
z_dim
=
np
.
linspace
(
z_lim
,
zmax
,
nb_domain_grid_pts
[
2
])
#obtain the material (and not internal vacuum) by observing if the point
#is in or out the submesh. This creates comm.size number of a partially
#filled array where it is non-zero only on the sub-mesh present at that rank
#obtain the temperature by searching every point at every sub-mesh domain
#pass if fenics raises an error (due to point not being present on that domain)
for
i
,
x
in
enumerate
(
x_dim
):
for
j
,
y
in
enumerate
(
y_dim
):
for
k
,
z
in
enumerate
(
z_dim
):
if
len
(
bounding_box
.
compute_collisions
(
Point
(
x
+
0.5
*
dx
,
y
+
0.5
*
dy
,
z
+
0.5
*
dz
)))
!=
0
:
bulk
[
i
,
j
,
k
]
=
1
del
(
bounding_box
)
gc
.
collect
()
#gather the material across all FEM ranks and broadcast to all FEM ranks
#bulk and temperature_array are global (size corresponds to complete domain)
bulk
=
comm
.
allreduce
(
bulk
,
op
=
MPI
.
SUM
)
bulk
[
bulk
>
1
]
=
1
#add base plate at the bottom and vacuum on top and sides
#increase the number of grid points and total domain length in each dimension
bulk
=
np
.
insert
(
bulk
,
0
,
2
,
axis
=
2
)
nb_domain_grid_pts
[
2
]
+=
1
domain_lengths
[
2
]
+=
element_dimensions
[
2
]
#domain discretisation TO DO:muspectre has this array already
x_dim
=
np
.
linspace
(
xmin
,
x_lim
,
nb_domain_grid_pts
[
0
])
y_dim
=
np
.
linspace
(
ymin
,
ymax
,
nb_domain_grid_pts
[
1
])
z_dim
=
np
.
linspace
(
z_lim
-
element_dimensions
[
2
],
zmax
,
nb_domain_grid_pts
[
2
])
coords
=
np
.
zeros
([
nb_domain_grid_pts
[
0
],
nb_domain_grid_pts
[
1
],
nb_domain_grid_pts
[
2
],
3
])
temperature_array
=
np
.
zeros
((
1
,
nb_quad
)
+
tuple
(
nb_domain_grid_pts
))
for
i
in
range
(
nb_domain_grid_pts
[
0
]):
coords
[
i
,:,:,
0
]
=
x_dim
[
i
]
for
i
in
range
(
nb_domain_grid_pts
[
1
]):
coords
[:,
i
,:,
1
]
=
y_dim
[
i
]
for
i
in
range
(
nb_domain_grid_pts
[
2
]):
coords
[:,:,
i
,
2
]
=
z_dim
[
i
]
#quadrature coordinates relative to the part
#local bulk information on each partition
quad_coordinates_array
=
np
.
zeros
(
tuple
(
np
.
shape
(
temperature_array
))
+
(
3
,))
#bulk_local = np.zeros_like(bulk)
for
i
,
x
in
enumerate
(
x_dim
):
for
j
,
y
in
enumerate
(
y_dim
):
for
k
,
z
in
enumerate
(
z_dim
):
if
bulk
[
i
,
j
,
k
]
==
1
:
for
q
in
range
(
nb_quad
):
try
:
quad_coordinates_array
[
0
,
q
,
i
,
j
,
k
]
=
Point
(
x
+
quad_coords
[
q
,
0
],
y
+
quad_coords
[
q
,
1
],
z
+
quad_coords
[
q
,
2
])[:]
except
RuntimeError
:
pass
local_multi_index
=
[]
local_multi_index_coordinates
=
[]
with
np
.
nditer
([
bulk
],
flags
=
[
'multi_index'
],
op_flags
=
[
'readwrite'
])
as
it
:
for
iterable
in
it
:
if
bulk
[
it
.
multi_index
]
==
1
:
for
q
in
range
(
nb_quad
):
final_index
=
tuple
((
0
,
q
))
+
it
.
multi_index
try
:
temperature_field_store
(
quad_coordinates_array
[
tuple
((
0
,
q
))
+
it
.
multi_index
])
#bulk_local[it.multi_index] = bulk[it.multi_index]
local_multi_index
.
append
(
final_index
)
local_multi_index_coordinates
.
append
(
quad_coordinates_array
[
final_index
])
#break
except
RuntimeError
:
pass
# print("element_dimensions", element_dimensions)
# print("element_dimensions", nb_domain_grid_pts)
# print(x_dim)
#store the temperature on the array with the right index if point is found
#this creates comm.size number of temperature_arrays each non-zero only on
#the sub-mesh present in that rank
#mesh and temperature are divided differently accross communicators!! TO LOOK!
# for i,x in enumerate(x_dim):
# for j,y in enumerate(y_dim):
# for k,z in enumerate(z_dim):
# if bulk[i,j,k] == 1:
# for q in range(nb_quad):
# try:
# temperature_array[0,q,i,j,k] = temperature_field_store(Point(x,y,z))
# except RuntimeError:
# pass
#send global arrays (material/temperature/coordinates) from FEM communicator
#to muSpectre communicator
for
proc_rank
in
send_ranks
:
comm_py
.
send
(
rank
,
proc_rank
,
tag
=
0
)
comm_py
.
send
(
bulk
,
proc_rank
,
tag
=
1
)
comm_py
.
send
(
nb_domain_grid_pts
,
proc_rank
,
tag
=
3
)
comm_py
.
send
(
domain_lengths
,
proc_rank
,
tag
=
4
)
comm_py
.
send
(
coords
,
proc_rank
,
tag
=
5
)
comm_py
.
send
(
timestamp_arr
,
proc_rank
,
tag
=
6
)
comm_py
.
send
(
temperature_array
,
proc_rank
,
tag
=
7
)
list_subdomain_loc
=
[
None
]
*
size_py
list_nb_subdomain_grid_pts
=
[
None
]
*
size_py
#print(rank_py, send_ranks)
for
f
,
proc_rank
in
enumerate
(
send_ranks
):
list_subdomain_loc
[
proc_rank
]
=
comm_py
.
recv
(
source
=
proc_rank
,
tag
=
proc_rank
)
# for i,j in enumerate(list_subdomain_loc):
# print(rank, i,j)
for
f
,
proc_rank
in
enumerate
(
send_ranks
):
list_nb_subdomain_grid_pts
[
proc_rank
]
=
comm_py
.
recv
(
source
=
proc_rank
,
tag
=
proc_rank
)
# for i,j in enumerate(list_nb_subdomain_grid_pts):
# print(rank, i,j)
end_time
=
MPI
.
Wtime
()
#print("FENICS INIT \n Local rank = {}, took {} seconds".format(rank, end_time-start_time))
#for muSepctre processes
if
color
==
1
:
#define muSpectre communicator, this communicator is called comm_mu
comm_mu
=
muGrid
.
Communicator
(
comm
)
#local size and rank on the split communicator
rank
=
comm
.
rank
procs
=
comm
.
size
start_time
=
MPI
.
Wtime
()
#recieve from FEM communicator material arrays and system size parameters
recieve_rank
=
comm_py
.
recv
(
source
=
MPI
.
ANY_SOURCE
,
tag
=
0
)
bulk
=
comm_py
.
recv
(
source
=
recieve_rank
,
tag
=
1
)
nb_domain_grid_pts
=
comm_py
.
recv
(
source
=
recieve_rank
,
tag
=
3
)
domain_lengths
=
comm_py
.
recv
(
source
=
recieve_rank
,
tag
=
4
)
coords
=
comm_py
.
recv
(
source
=
recieve_rank
,
tag
=
5
)
#recieve empty timestamp array from FEM communicator
timestamp_arr
=
comm_py
.
recv
(
source
=
recieve_rank
,
tag
=
6
)
delta_t_vector
=
timestamp_arr
[
1
:]
-
timestamp_arr
[:
-
1
]
temperature_array
=
comm_py
.
recv
(
source
=
recieve_rank
,
tag
=
7
)
#create cell using muSpectre communicator
cell
=
msp
.
cell
.
CellData
.
make_parallel
(
list
(
nb_domain_grid_pts
),
list
(
domain_lengths
),
comm_mu
)
cell
.
nb_quad_pts
=
nb_quad
cell
.
get_fields
()
.
set_nb_sub_pts
(
"quad"
,
nb_quad
)
#define materials
material
=
msp
.
material
.
MaterialHEA_3d
.
make
(
cell
,
"material"
,
young
,
poisson
,
alpha_s
,
Delta_E_bs
,
epsilon_sat
,
sigma_gb
,
sigma_s0
,
sigma_f
)
vacuum
=
msp
.
material
.
MaterialLinearElastic1_3d
.
make
(
cell
,
"vacuum"
,
0.0
,
poisson
)
base
=
msp
.
material
.
MaterialLinearElastic1_3d
.
make
(
cell
,
"base"
,
20
*
young
,
poisson
)
#register fields and field collection
material_phase
=
cell
.
get_fields
()
.
register_int_field
(
"material2"
,
1
,
'quad'
)
bulk_array
=
np
.
zeros
(
np
.
shape
(
material_phase
.
array
()),
np
.
dtype
(
float
))
melt_bool
=
-
1
*
np
.
ones
(
np
.
shape
(
bulk_array
),
np
.
dtype
(
np
.
int32
))
sub_domain_loc
=
cell
.
subdomain_locations
nb_subdomain_grid_pts
=
cell
.
nb_subdomain_grid_pts
#assign materials
for
pixel_index
,
pixel
in
enumerate
(
cell
.
pixels
):
i
,
j
,
k
=
pixel
[
0
],
pixel
[
1
],
pixel
[
2
]
a
=
i
-
sub_domain_loc
[
0
]
b
=
j
-
sub_domain_loc
[
1
]
c
=
k
-
sub_domain_loc
[
2
]
if
bulk
[
tuple
(
pixel
)]
==
1
:
material
.
add_pixel
(
pixel_index
)
bulk_array
[
0
,:,
a
,
b
,
c
]
=
1
elif
bulk
[
tuple
(
pixel
)]
==
0
:
vacuum
.
add_pixel
(
pixel_index
)
elif
bulk
[
tuple
(
pixel
)]
==
2
:
base
.
add_pixel
(
pixel_index
)
#define solver parameters
gradient
=
Stencils3D
.
linear_finite_elements
weights
=
6
*
[
1
]
material
.
identify_temperature_loaded_quad_pts
(
cell
,
bulk_array
.
reshape
(
-
1
,
1
,
order
=
"F"
))
#material.dt = 2.5e-5
material
.
T_m
=
2000
material
.
alpha_thermal
=
20e-6
#local_bulk_array = bulk[sub_domain_loc[0]:sub_domain_loc[0]+nb_subdomain_grid_pts[0], sub_domain_loc[1]:sub_domain_loc[1]+nb_subdomain_grid_pts[1], sub_domain_loc[2]:sub_domain_loc[2]+nb_subdomain_grid_pts[2]]
## use solver
krylov_solver
=
msp
.
solvers
.
KrylovSolverCG
(
cg_tol
,
maxiter
)
solver
=
msp
.
solvers
.
SolverNewtonCG
(
cell
,
krylov_solver
,
msp
.
Verbosity
.
Silent
,
newton_tol
,
equil_tol
,
maxiter
,
gradient
,
weights
)
solver
.
formulation
=
msp
.
Formulation
.
small_strain
solver
.
initialise_cell
()
#define file input/output parameters for muSpectre output
# output_filename = "layer_"+str(layer_no-1).zfill(3)+".nc"
# if comm_mu.rank == 0 and os.path.exists(output_filename):
# os.remove(output_filename)
output_filename
=
"converted_temperature_"
+
str
(
layer_no
)
.
zfill
(
3
)
+
".nc"
if
comm_mu
.
rank
==
0
and
os
.
path
.
exists
(
output_filename
):
os
.
remove
(
output_filename
)
comm_mu
.
barrier
()
file_io_object_write
=
muGrid
.
FileIONetCDF
(
output_filename
,
muGrid
.
FileIONetCDF
.
OpenMode
.
Write
,
comm_mu
)
#coordinates = cell.get_fields().register_real_field("coordinates", 3, 'quad')
#temperature_field = cell.get_fields().register_real_field("temperature2", 1, 'quad')
#vacuum_strain_field = cell.get_fields().register_real_field("vacuum_strain2", (3,3), "quad")
file_io_object_write
.
register_field_collection
(
field_collection
=
cell
.
fields
)
file_io_object_write
.
register_field_collection
(
field_collection
=
material
.
collection
)
#print("cell field names:\n", cell.get_field_names())
#initialize global time attribute (to be updated once filled)
file_io_object_write
.
write_global_attribute
(
'timestamp'
,
timestamp_arr
)
#create pointer-like copies for fields
material_phase_alias
=
np
.
array
(
material_phase
,
copy
=
False
)
#coordinates_alias = np.array(coordinates, copy=False)
#temperature_alias = np.array(temperature_field, copy=False)
#vacuum_strain_alias = np.array(vacuum_strain_field, copy=False)
#melt_bool = -1*np.ones(np.shape(material_phase), np.dtype(np.int32))
#assign values to the above created aliases
for
pixel_id
,
pixel_coord
in
cell
.
pixels
.
enumerate
():
i
,
j
,
k
=
pixel_coord
[
0
],
pixel_coord
[
1
],
pixel_coord
[
2
]
for
q
in
range
(
nb_quad
):
# local coordinates on the processor
a
=
i
-
sub_domain_loc
[
0
]
b
=
j
-
sub_domain_loc
[
1
]
c
=
k
-
sub_domain_loc
[
2
]
material_phase_alias
[
0
,
q
,
a
,
b
,
c
]
=
int
(
bulk
[
i
,
j
,
k
])
#coordinates_alias[0, q, a, b, c] = coords[i,j,k][0]+quad_coords[q,0]
#coordinates_alias[1, q, a, b, c] = coords[i,j,k][1]+quad_coords[q,1]
#coordinates_alias[2, q, a, b, c] = coords[i,j,k][2]+quad_coords[q,2]
#if k == nb_domain_grid_pts[2]-2:
#melt_bool[0,q,a,b,c] = 0
#use the temperature array to change the values using temperature
#array pointer (alias)
#temperature_alias[:,:,:,:,:] = update_temperature_field(cell, temperature_array)[:,:,:,:,:]
comm_mu
.
barrier
()
if
layer_no
not
in
[
10
,
30
,
50
,
70
,
60
,
90
,
130
,
170
,
210
,
250
,
270
,
290
,
330
]:
from
transfer_previous_layer
import
transfer_previous_layer
strain_field
=
cell
.
get_fields
()
.
get_field
(
"grad"
)
strain_field_alias
=
np
.
array
(
strain_field
,
copy
=
False
)
stress_field
=
cell
.
get_fields
()
.
get_field
(
"flux"
)
stress_field_alias
=
np
.
array
(
stress_field
,
copy
=
False
)
#previous_cell = transfer_previous_layer(layer_no-1)
[
previous_stress
,
previous_strain
,
previous_epsilon_bar
,
previous_melt_pool
,
previous_material_temperature
,
previous_epsilon_plastic
,
previous_material_vacuum_strain
]
=
\
transfer_previous_layer
(
layer_no
-
1
,
sub_domain_loc
,
nb_subdomain_grid_pts
)
#assign values to the above created aliases
for
pixel_id
,
pixel_coord
in
cell
.
pixels
.
enumerate
():
i
,
j
,
k
=
pixel_coord
[
0
],
pixel_coord
[
1
],
pixel_coord
[
2
]
a
=
i
-
sub_domain_loc
[
0
]
b
=
j
-
sub_domain_loc
[
1
]
c
=
k
-
sub_domain_loc
[
2
]
if
k
<
nb_domain_grid_pts
[
2
]
-
1
:
for
q
in
range
(
nb_quad
):
# local coordinates on the processor
strain_field_alias
[:,
:,
q
,
a
,
b
,
c
]
=
previous_strain
[:,
:,
q
,
i
,
j
,
k
]
stress_field_alias
[:,
:,
q
,
a
,
b
,
c
]
=
previous_stress
[:,
:,
q
,
i
,
j
,
k
]
#temperature_alias[0,q,a,b,c] = previous_temperature[0,q,i,j,k]
#vacuum_strain_alias[:, :, q, a, b, c] = previous_vacuum_strain[:, :, q, i, j, k]
#if k == nb_domain_grid_pts[2]-2:
#vacuum_strain_alias[:, :, q, a, b, c] = previous_strain[:, :, q, i, j, k]
material
.
update_temperature_field
(
cell
,
previous_material_temperature
.
reshape
(
-
1
,
1
,
order
=
"F"
))
material
.
update_epsilon_bar_field
(
cell
,
previous_epsilon_bar
.
reshape
(
-
1
,
1
,
order
=
"F"
))
material
.
update_melt_bool_field
(
cell
,
previous_melt_pool
.
reshape
(
-
1
,
1
,
order
=
"F"
))
material
.
update_epsilon_plastic_field
(
cell
,
previous_epsilon_plastic
.
reshape
(
-
1
,
1
,
order
=
"F"
))
material
.
update_vacuum_strain_field
(
cell
,
previous_material_vacuum_strain
.
reshape
(
-
1
,
1
,
order
=
"F"
))
del
(
previous_stress
,
previous_strain
,
previous_epsilon_bar
,
previous_melt_pool
,
previous_material_temperature
,
previous_epsilon_plastic
,
previous_material_vacuum_strain
)
gc
.
collect
()
else
:
#use the temperature array to change the values using temperature
#array pointer (alias)
size_array
=
cell
.
get_fields
()
.
get_field
(
"flux"
)
.
array
()
material
.
update_temperature_field
(
cell
,
update_temperature_field
(
cell
,
temperature_array
)
.
reshape
(
-
1
,
1
,
order
=
"F"
))
material
.
update_epsilon_bar_field
(
cell
,
np
.
zeros_like
(
bulk_array
)
.
reshape
(
-
1
,
1
,
order
=
"F"
))
material
.
update_epsilon_plastic_field
(
cell
,
np
.
zeros_like
(
size_array
)
.
reshape
(
-
1
,
1
,
order
=
"F"
))
material
.
update_melt_bool_field
(
cell
,
melt_bool
.
reshape
(
-
1
,
1
,
order
=
"F"
))
comm_mu
.
barrier
()
comm_py
.
send
([
cell
.
subdomain_locations
[
0
],
cell
.
subdomain_locations
[
1
],
cell
.
subdomain_locations
[
2
]],
recieve_rank
,
tag
=
rank
+
size_py
-
procs
)
comm_py
.
send
([
cell
.
nb_subdomain_grid_pts
[
0
],
cell
.
nb_subdomain_grid_pts
[
1
],
cell
.
nb_subdomain_grid_pts
[
2
]],
recieve_rank
,
tag
=
rank
+
size_py
-
procs
)
end_time
=
MPI
.
Wtime
()
#print("MUSPECTRE INIT \n Local rank = {}, took {} seconds".format(rank, end_time-start_time))
#make nb_Steps global by broadcasting
nb_steps
=
comm_py
.
bcast
(
nb_steps
,
root
=
0
)
start
=
timer
.
time
()
comm_py
.
barrier
()
#seperate operations for FEM and muSpectre communicators depending on ranks
if
color
==
1
:
#temperature_array = np.zeros(nb_domain_grid_pts)
start_time
=
MPI
.
Wtime
()
dummy_temperature
=
np
.
zeros
((
1
,
cell
.
nb_quad_pts
,
cell
.
nb_subdomain_grid_pts
[
0
],
cell
.
nb_subdomain_grid_pts
[
1
],
cell
.
nb_subdomain_grid_pts
[
2
]))
#for all steps
for
s
in
range
(
nb_steps
):
step_start
=
MPI
.
Wtime
()
if
rank
==
0
:
print
(
'=== STEP {}/{} ==='
.
format
(
s
+
1
,
nb_steps
))
file_io_object_write
.
append_frame
()
.
write
([
"temperature"
])
#recieve temp_array from constant sender request
#temperature_array = np.zeros(nb_domain_grid_pts)
#print("MUSPECTRE Step {} --- Recieve request created: {}".format(s, MPI.Wtime()- step_start))
#material.dt = delta_t_vector[s]
req
=
comm_py
.
Irecv
(
dummy_temperature
,
source
=
recieve_rank
,
tag
=
s
)
#print("MUSPECTRE Step {} --- Recieve request done: {}".format(s, MPI.Wtime()- step_start))
#print(np.max(cell.get_fields().get_field("grad").array()))
#solve_start = MPI.Wtime()
#solve machanics problem
#res = solver.solve_load_increment(strain_step)
comm_mu
.
barrier
()
#print("MUSPECTRE Rank {} - Step {} --- Total Solve time: {}".format(rank, s, MPI.Wtime()- solve_start) )
#write all fields in .nc format TO DO:not all fields
#file_io_object_write.append_frame().write(["flux","grad", "epsilon_bar", "epsilon_plastic","temperature", "vacuum_strain", "melt_pool", "material2"])
#if (s+1)%5 == 0:
#print('=== STEP {}/{} ==='.format(s+1, nb_steps))
#write all fields in .nc format TO DO:not all fields
#file_io_object_write.append_frame().write(["coordinates", "flux", "grad", "material2","temperature2", "epsilon_bar", "temperature", "vacuum_strain", "epsilon_plastic"])
#if (s+1) == 100: break
#print("MUSPECTRE Step {} Rank {} --- Step done: {}".format(s, rank, MPI.Wtime()- step_start))
#make sure temperature array is recieved
req
.
wait
()
#print("MUSPECTRE Step {} --- Recieve request Wait done: {}".format(s, MPI.Wtime()- step_start))
#use the temperature array to change the values using temperature
#array pointer (alias)
#temperature_alias[:,:,:,:,:] = dummy_temperature
#dummy_temperature = np.ones_like(dummy_temperature)*2005 - 4*s
material
.
update_temperature_field
(
cell
,
dummy_temperature
.
reshape
(
-
1
,
1
,
order
=
"F"
))
#if (s+1)%100 == 0:
#if rank == 0: print('=== STEP {}/{} ==='.format(s, nb_steps))
#break
#res = solver.solve_load_increment(strain_step)
#write all fields in .nc format TO DO:not all fields
file_io_object_write
.
append_frame
()
.
write
([
"temperature"
])
end_time
=
MPI
.
Wtime
()
if
rank
==
0
:
print
(
"Total MUSPECTRE time: {}"
.
format
(
end_time
-
start_time
))
comm_mu
.
barrier
()
if
color
==
0
:
start_time
=
MPI
.
Wtime
()
dummy_array
=
np
.
zeros
((
1
,
nb_quad
)
+
tuple
(
nb_domain_grid_pts
))
for
s
in
range
(
nb_steps
):
#start constant sending request
#print("FENICS Step {} --- Send Requests created: {}".format(s, MPI.Wtime()-start_time))
#reqs = [None] * len(send_ranks)
#for p, proc_rank in enumerate(send_ranks):
# reqs[p] = comm_py.Isend([temperature_array, MPI.DOUBLE], proc_rank, tag=s)
#print("FENICS Step {} --- Send Requests done: {}".format(s, MPI.Wtime()-start_time))
#re-read the temperature for the next step
no
+=
1
vec_name
=
"/Temperature/vector_
%d
"
%
no
timestamp
=
hdf5_file
.
attributes
(
vec_name
)[
'timestamp'
]
timestamp_arr
[
s
]
=
timestamp
hdf5_file
.
read
(
temperature_field_store
,
vec_name
)
#store it on a dummy array since we don't want to change
#temperature array before the sending/recieving is done
#which will be ensured by MPI.wait
#dummy_array = np.zeros((1,nb_quad)+tuple(nb_domain_grid_pts))
#store the temperature on the array with the right index if point is found
#this creates comm.size number of temperature_arrays each non-zero only on
#the sub-mesh present in that rank
#mesh and temperature are divided differently accross communicators!! TO LOOK!
#with np.nditer([bulk_local, dummy_array[0,0,:,:,:]], flags=['multi_index'], op_flags=[['readonly', 'arraymask'], ['writeonly', 'writemasked']]) as it:
# for iterable in it:
# if bulk_local[it.multi_index] == 1:
# for q in range(nb_quad):
# try:
# var = temperature_field_store(quad_coordinates_array[tuple((0,q))+it.multi_index])
# if var < 0:
# pass
# else:
# dummy_array[tuple((0,q))+it.multi_index] = var
# except RuntimeError:
# pass
for
lmi_index
,
lmi
in
enumerate
(
local_multi_index
):
val
=
temperature_field_store
(
quad_coordinates_array
[
lmi
])
if
val
<=
0
:
dummy_array
[
lmi
]
=
0
else
:
dummy_array
[
lmi
]
=
val
#make sure temperature sending is complete and create the new
#temperature array
if
s
!=
0
:
for
p
,
proc_rank
in
enumerate
(
send_ranks
):
reqs
[
p
]
.
wait
()
#print("FENICS Step {} --- Send Requests Wait done: {}".format(s, MPI.Wtime()-start_time))
temperature_array
=
comm
.
allreduce
(
dummy_array
,
op
=
MPI
.
SUM
)
temperature_array
=
temperature_array
+
293
#start constant sending request
reqs
=
[
None
]
*
len
(
send_ranks
)
for
p
,
proc_rank
in
enumerate
(
send_ranks
):
send_temp
=
np
.
array
(
temperature_array
[:,:,
list_subdomain_loc
[
proc_rank
][
0
]:
list_subdomain_loc
[
proc_rank
][
0
]
+
list_nb_subdomain_grid_pts
[
proc_rank
][
0
],
list_subdomain_loc
[
proc_rank
][
1
]:
list_subdomain_loc
[
proc_rank
][
1
]
+
list_nb_subdomain_grid_pts
[
proc_rank
][
1
],
list_subdomain_loc
[
proc_rank
][
2
]:
list_subdomain_loc
[
proc_rank
][
2
]
+
list_nb_subdomain_grid_pts
[
proc_rank
][
2
]])
#print(p,proc_rank, list_subdomain_loc[proc_rank], list_nb_subdomain_grid_pts[proc_rank])
#print('send_shape', np.shape(send_temp))
reqs
[
p
]
=
comm_py
.
Isend
([
send_temp
,
MPI
.
DOUBLE
],
proc_rank
,
tag
=
s
)
#if (s+1)%100 == 0:
#print('=== STEP {}/{} ==='.format(s+1, nb_steps))
#break
#print("FENICS Step {} --- Step done: {}".format(s, MPI.Wtime()-start_time))
comm
.
barrier
()
end_time
=
MPI
.
Wtime
()
if
rank
==
0
:
print
(
"Total FENICS time: {}"
.
format
(
end_time
-
start_time
))
#make timestamp array global by broadcasting
timestamp_arr
=
comm_py
.
bcast
(
timestamp_arr
,
root
=
0
)
#if on the muSpectre communicator, update the global timestamp
if
color
==
1
:
file_io_object_write
.
update_global_attribute
(
'timestamp'
,
'timestamp'
,
timestamp_arr
)
file_io_object_write
.
close
()
comm_py
.
barrier
()
if
rank_py
==
0
:
print
(
"Total run time for 100 steps:"
,
timer
.
time
()
-
start
)
if
__name__
==
'__main__'
:
main
()
Event Timeline
Log In to Comment