Page MenuHomec4science

residual_stress_analysis_hea_1q.py
No OneTemporary

File Metadata

Created
Wed, Aug 14, 15:50

residual_stress_analysis_hea_1q.py

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 25 14:38:47 2022
@author: ekinkubilay
"""
import os
import sys
#import h5py
sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/python"))
sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/libmufft/python"))
sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/libmugrid/python"))
import muFFT
import muSpectre as msp
import numpy as np
import muGrid
from muFFT import Stencils2D
from muFFT import Stencils3D
import cProfile, pstats
import time as timer
from mpi4py import MPI
from dolfin import Mesh, XDMFFile, MeshTransformation, Point, HDF5File, FunctionSpace, Function
from petsc4py import PETSc
import argparse
import gc
#from transfer_previous_layer import transfer_previous_layer
def update_temperature_field(cell, temperature_array):
sub_domain_loc = cell.subdomain_locations
#temperature = np.zeros((cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]))
temperature = np.zeros((1,cell.nb_quad_pts, cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]))
for pixel_id, pixel_coord in cell.pixels.enumerate():
i, j, k = pixel_coord[0], pixel_coord[1], pixel_coord[2]
a = i - sub_domain_loc[0]
b = j - sub_domain_loc[1]
c = k - sub_domain_loc[2]
for q in range(cell.nb_quad_pts):
temperature[0,q,a,b,c] = np.max([temperature_array[0,q,i,j,k],0]) + 293
return temperature
def parse_args():
"""
Function to handle arguments
"""
parser = argparse.ArgumentParser(description='Run given layer')
parser.add_argument("layer_no", type=int, help='layer_number')
parser.add_argument("mu_rank", type=int, help='number of processes for muSpectre')
parser.add_argument("factor", type=float, help='factor for material parameter analysis')
parser.add_argument("target_directory", help='target directory for all output/input files')
args = parser.parse_args()
return args
def main():
args = parse_args()
layer_no = args.layer_no
factor = args.factor
target_directory = args.target_directory
#os.chdir('/scratch/kubilay/mechanical_model/results/mu_layer')
os.chdir(target_directory)
#comm_py is WORLD communicator (with even size), it is split into two
#set muspectre communicator
from mpi4py import MPI
comm_py = MPI.COMM_WORLD
rank_py = comm_py.rank
size_py = comm_py.size
#print("Hello! I'm rank %d from %d running in total..." % (comm_py.rank, comm_py.size))
#size must be even, color is tags for the split ranks
mu_rank = args.mu_rank
color = np.sign(int(rank_py/(size_py-mu_rank)))
#split communicator
comm = comm_py.Split(color, rank_py)
rank = comm.rank
size = comm.size
#print(rank_py, size_py, rank, size, color)
#print("Total number of muSpectre tasks: %d \t Total number of fenics tasks: %d" % (comm_py.size-size, size))
#define empty variable for broadcast buffer
nb_steps = None
dx, dy, dz = 0.0002, 0.0003125, 0.00003
z_lim = 0.00801
x_lim = 0.055
element_dimensions = [dx, dy, dz]
#define global simulation parameters
dim = 3
nb_quad = 1
#nb_domain_grid_pts = [11,11,11]
#define constant material properties
young = 162e9 #GPa
poisson = 0.277 #unitless
alpha_s = 0.55 #unitless
Delta_E_bs = 1.13*factor**(2/3) #eV
epsilon_sat = 0.35 # unitless
sigma_gb = 0.150e9 #GPa
sigma_s0 = 0.254*factor**(4/3)*1e9 #GPa
sigma_f = 1.200e9 #GPa
## define the convergence tolerance for the Newton-Raphson increment
newton_tol = 1e-3
equil_tol = newton_tol
## tolerance for the solver of the linear cell
cg_tol = -1e-2
## macroscopic strain
strain_step = np.zeros((3,3)) #no applied external strain
maxiter = 500000 # for linear cell solver
#exact coordinates of each quadrature point relative to the local element coordinates
quad_coords = [np.array([0.75,0.5,0.25]), np.array([0.75,0.25,0.5]), np.array([0.5,0.75,0.25]), np.array([0.25,0.75,0.5]), np.array([0.5,0.25,0.75]), np.array([0.25,0.5,0.75])]
quad_coords = quad_coords*np.array([dx,dy,dz])
if color == 0:
#local size and rank on the split communicator, this communicator is called comm
rank = comm.rank
procs = comm.size
other_size = size_py - size
if rank == 0: print("Total number of muSpectre tasks: %d \t Total number of fenics tasks: %d" % (comm_py.size-size, size))
start_time = MPI.Wtime()
send_ranks = np.array_split(np.arange(0,other_size, 1)+size, size)[rank]
#print(send_ranks)
from dolfin import Mesh, XDMFFile, MeshTransformation, Point, HDF5File, FunctionSpace, Function
#get the geometry and temperature via fenics
mesh = Mesh(comm)
#read the mesh and the temperature function at initial timestep
#using Fenics within the FEM communicator
#filename = "/work/lammm/kubilay/mesh/layer_"+str(layer_no).zfill(3)+".xdmf"
#temperature_filename = "/work/lammm/kubilay/results/temperature/temperature_layer_"+str(layer_no).zfill(3)+".h5"
filename = "/scratch/kubilay/mechanical_model/results/coarse/temperature_coarse_"+str(layer_no).zfill(3)+".xdmf"
temperature_filename = "/scratch/kubilay/mechanical_model/results/coarse/relevant_temperature_"+str(layer_no).zfill(3)+".h5"
f = XDMFFile(comm, filename)
f.read(mesh)
#MeshTransformation.rescale(mesh, 1e-3, Point(0,0,0)) #in m
f.close()
del(f)
gc.collect()
bounding_box = mesh.bounding_box_tree()
hdf5_file = HDF5File(comm, temperature_filename, "r")
V = FunctionSpace(mesh, "CG", 1)
temperature_field_store = Function(V)
nb_steps = hdf5_file.attributes('Temperature')['count']-1
timestamp_arr = np.zeros(nb_steps+1)
for t in range(nb_steps+1):
vec_name = "/Temperature/vector_%d"%t
time = hdf5_file.attributes(vec_name)['timestamp']
timestamp_arr[t] = time
no = 0
vec_name = "/Temperature/vector_%d"%no
hdf5_file.read(temperature_field_store, vec_name)
#obtain local domain dimensions
xmax = np.max(mesh.coordinates()[:,0])
xmin = np.min(mesh.coordinates()[:,0])
ymax = np.max(mesh.coordinates()[:,1])
ymin = np.min(mesh.coordinates()[:,1])
zmax = np.max(mesh.coordinates()[:,2])
zmin = np.min(mesh.coordinates()[:,2])
#communicate max/min domain values within FEM communicator
xmax = comm.allreduce(xmax, op=MPI.MAX)
ymax = comm.allreduce(ymax, op=MPI.MAX)
zmax = comm.allreduce(zmax, op=MPI.MAX)
xmin = comm.allreduce(xmin, op=MPI.MIN)
ymin = comm.allreduce(ymin, op=MPI.MIN)
zmin = comm.allreduce(zmin, op=MPI.MIN)
#z_lim = zmin #DELETE TO CONTROL LAYERS
#calculate domain lengths and element dimensions
domain_lengths = [x_lim-xmin,ymax-ymin,zmax-z_lim]
nb_domain_grid_pts = list(np.array(domain_lengths)/np.array(element_dimensions))
nb_domain_grid_pts = [round(i) for i in nb_domain_grid_pts]
#array to store material (and not vacuum or base plate)
bulk = np.ones(nb_domain_grid_pts)
#domain discretisation TO DO:muspectre has this array already
#x_dim = np.linspace(xmin, x_lim, nb_domain_grid_pts[0])
#y_dim = np.linspace(ymin, ymax, nb_domain_grid_pts[1])
#z_dim = np.linspace(z_lim, zmax, nb_domain_grid_pts[2])
#obtain the material (and not internal vacuum) by observing if the point
#is in or out the submesh. This creates comm.size number of a partially
#filled array where it is non-zero only on the sub-mesh present at that rank
#obtain the temperature by searching every point at every sub-mesh domain
#pass if fenics raises an error (due to point not being present on that domain)
#for i,x in enumerate(x_dim):
# for j,y in enumerate(y_dim):
# for k,z in enumerate(z_dim):
# if len(bounding_box.compute_collisions(Point(x+0.5*dx,y+0.5*dy,z+0.5*dz))) != 0:
# bulk[i,j,k] = 1
del(bounding_box)
gc.collect()
#gather the material across all FEM ranks and broadcast to all FEM ranks
#bulk and temperature_array are global (size corresponds to complete domain)
#bulk = comm.allreduce(bulk, op=MPI.SUM)
#bulk[bulk > 1] = 1
#add base plate at the bottom and vacuum on top and sides
#increase the number of grid points and total domain length in each dimension
#bulk = np.insert(bulk, 0, 2, axis=2)
#nb_domain_grid_pts[2] += 1
#domain_lengths[2] += element_dimensions[2]
#bulk = np.insert(bulk, 0, 2, axis=2)
#nb_domain_grid_pts[2] += 1
#domain_lengths[2] += element_dimensions[2]
#vacuum on top layer (1)
bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
nb_domain_grid_pts[2] += 1
domain_lengths[2] += element_dimensions[2]
#bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
#nb_domain_grid_pts[2] += 1
#domain_lengths[2] += element_dimensions[2]
#vacuum on y plane (2)
bulk = np.insert(bulk, nb_domain_grid_pts[1], 0, axis=1)
nb_domain_grid_pts[1] += 1
domain_lengths[1] += element_dimensions[1]
#bulk = np.insert(bulk, nb_domain_grid_pts[1], 0, axis=1)
#nb_domain_grid_pts[1] += 1
#domain_lengths[1] += element_dimensions[1]
#vacuum on x plane (1)
bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
nb_domain_grid_pts[0] += 1
domain_lengths[0] += element_dimensions[0]
bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
nb_domain_grid_pts[0] += 1
domain_lengths[0] += element_dimensions[0]
#bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
#nb_domain_grid_pts[0] += 1
#domain_lengths[0] += element_dimensions[0]
#bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
#nb_domain_grid_pts[0] += 1
#domain_lengths[0] += element_dimensions[0]
#base layer on bottom layer (10)
no_base_layer = 22
for i in range(no_base_layer):
bulk = np.insert(bulk, 0, 2, axis=2)
nb_domain_grid_pts[2] += 1
domain_lengths[2] += element_dimensions[2]
#domain discretisation TO DO:muspectre has this array already
x_dim = np.linspace(xmin, x_lim+1*element_dimensions[0], nb_domain_grid_pts[0])+0.5*dx
y_dim = np.linspace(ymin, ymax+0*element_dimensions[1], nb_domain_grid_pts[1])+0.5*dy
z_dim = np.linspace(z_lim-no_base_layer*element_dimensions[2], zmax+0*element_dimensions[2], nb_domain_grid_pts[2])+0.5*dz
coords = np.zeros([nb_domain_grid_pts[0],nb_domain_grid_pts[1],nb_domain_grid_pts[2],3])
temperature_array = np.zeros((1,nb_quad)+tuple(nb_domain_grid_pts))
for i in range(nb_domain_grid_pts[0]):
coords[i,:,:,0] = x_dim[i]
for i in range(nb_domain_grid_pts[1]):
coords[:,i,:,1] = y_dim[i]
for i in range(nb_domain_grid_pts[2]):
coords[:,:,i,2] = z_dim[i]
s_list = [(i,i+0.001) for i in np.linspace(0.005, 0.050, 26)]
#s_list = [(0.005, 0.05)]
support_mask = np.ones_like(x_dim)
for i,x in enumerate(x_dim):
loc = x
for j,s in enumerate(s_list):
if loc>s[0] and loc<s[1]:
support_mask[i] = 0
for j,y in enumerate(y_dim):
bulk[:,j,no_base_layer-1] = bulk[:,j,no_base_layer-1]*support_mask
#bulk[:,-1,no_base_layer-1] = 0
#bulk[-1,:,no_base_layer-1] = 0
bulk[:,-1,:] = 0
bulk[-1,:,:] = 0
bulk[-2,:,:] = 0
#quadrature coordinates relative to the part
#local bulk information on each partition
quad_coordinates_array = np.zeros(tuple(np.shape(temperature_array))+(3,))
bulk_local = np.zeros_like(bulk)
for i,x in enumerate(x_dim):
for j,y in enumerate(y_dim):
for k,z in enumerate(z_dim):
if bulk[i,j,k] == 1:
for q in range(nb_quad):
try:
#quad_coordinates_array[0,q,i,j,k] = Point(x+quad_coords[q,0],y+quad_coords[q,1],z+quad_coords[q,2])[:]
quad_coordinates_array[0,q,i,j,k] = Point(x,y,z)[:]
except RuntimeError:
pass
local_multi_index = []
local_multi_index_coordinates = []
with np.nditer([bulk], flags=['multi_index'], op_flags=['readwrite']) as it:
for iterable in it:
if bulk[it.multi_index] == 1:
for q in range(nb_quad):
final_index = tuple((0,q))+it.multi_index
try:
temperature_field_store(quad_coordinates_array[tuple((0,q))+it.multi_index])
bulk_local[it.multi_index] = bulk[it.multi_index]
local_multi_index.append(final_index)
local_multi_index_coordinates.append(quad_coordinates_array[final_index])
#break
except RuntimeError:
pass
# print("element_dimensions", element_dimensions)
# print("element_dimensions", nb_domain_grid_pts)
# print(x_dim)
#store the temperature on the array with the right index if point is found
#this creates comm.size number of temperature_arrays each non-zero only on
#the sub-mesh present in that rank
#mesh and temperature are divided differently accross communicators!! TO LOOK!
# for i,x in enumerate(x_dim):
# for j,y in enumerate(y_dim):
# for k,z in enumerate(z_dim):
# if bulk[i,j,k] == 1:
# for q in range(nb_quad):
# try:
# temperature_array[0,q,i,j,k] = temperature_field_store(Point(x,y,z))
# except RuntimeError:
# pass
#send global arrays (material/temperature/coordinates) from FEM communicator
#to muSpectre communicator
for proc_rank in send_ranks:
comm_py.send(rank, proc_rank, tag=0)
comm_py.send(bulk, proc_rank, tag=1)
comm_py.send(nb_domain_grid_pts, proc_rank, tag=3)
comm_py.send(domain_lengths, proc_rank, tag=4)
comm_py.send(coords, proc_rank, tag=5)
comm_py.send(timestamp_arr, proc_rank, tag=6)
comm_py.send(temperature_array, proc_rank, tag=7)
list_subdomain_loc = [None] * size_py
list_nb_subdomain_grid_pts = [None] * size_py
#print(rank_py, send_ranks)
for f, proc_rank in enumerate(send_ranks):
list_subdomain_loc[proc_rank] = comm_py.recv(source = proc_rank, tag=proc_rank)
# for i,j in enumerate(list_subdomain_loc):
# print(rank, i,j)
for f, proc_rank in enumerate(send_ranks):
list_nb_subdomain_grid_pts[proc_rank] = comm_py.recv(source = proc_rank, tag=proc_rank)
# for i,j in enumerate(list_nb_subdomain_grid_pts):
# print(rank, i,j)
end_time = MPI.Wtime()
#print("FENICS INIT \n Local rank = {}, took {} seconds".format(rank, end_time-start_time))
#for muSepctre processes
if color == 1:
#define muSpectre communicator, this communicator is called comm_mu
comm_mu = muGrid.Communicator(comm)
#local size and rank on the split communicator
rank = comm.rank
procs = comm.size
start_time = MPI.Wtime()
#recieve from FEM communicator material arrays and system size parameters
recieve_rank = comm_py.recv(source = MPI.ANY_SOURCE, tag=0)
bulk = comm_py.recv(source = recieve_rank, tag=1)
nb_domain_grid_pts = comm_py.recv(source = recieve_rank, tag=3)
domain_lengths = comm_py.recv(source = recieve_rank, tag=4)
coords = comm_py.recv(source = recieve_rank, tag=5)
#recieve empty timestamp array from FEM communicator
timestamp_arr = comm_py.recv(source = recieve_rank, tag=6)
delta_t_vector = timestamp_arr[1:]-timestamp_arr[:-1]
delta_t_vector = np.insert(delta_t_vector, 0,0)
temperature_array = comm_py.recv(source= recieve_rank, tag=7)
#create cell using muSpectre communicator
cell = msp.cell.CellData.make_parallel(list(nb_domain_grid_pts), list(domain_lengths), comm_mu)
cell.nb_quad_pts = nb_quad
cell.get_fields().set_nb_sub_pts("quad", nb_quad)
#define materials
material = msp.material.MaterialHEA_3d.make(cell, "material", young, poisson, alpha_s, Delta_E_bs, epsilon_sat, sigma_gb, sigma_s0, sigma_f)
vacuum = msp.material.MaterialLinearElastic1_3d.make(cell, "vacuum", 0.0, 0.3)
#base = msp.material.MaterialLinearElastic1_3d.make(cell, "base", 3*young, 0.3)
#transverse_aniso = [178, 51, 63.4, 0, 0, 0, 178, 63.4, 0, 0, 0, 1655, 0, 0, 0, 634, 0, 0, 634, 0, 63.4]
transverse_aniso = [3*617e9, 3*236e9, 3*236e9, 0, 0, 0, 3*617e9, 3*236e9, 0, 0, 0, 3*617e9, 0, 0, 0, 3*190.3e9, 0, 0, 3*190.3e9, 0, 3*190.3e9]
base = msp.material.MaterialLinearAnisotropic_3d.make(cell, "base", transverse_aniso)
#register fields and field collection
material_phase = cell.get_fields().register_int_field("material2", 1, 'quad')
bulk_array = np.zeros(np.shape(material_phase.array()), np.dtype(float))
melt_bool = -1*np.ones(np.shape(bulk_array), np.dtype(np.int32))
sub_domain_loc = cell.subdomain_locations
nb_subdomain_grid_pts = cell.nb_subdomain_grid_pts
#assign materials
for pixel_index, pixel in enumerate(cell.pixels):
i, j, k = pixel[0], pixel[1], pixel[2]
a = i - sub_domain_loc[0]
b = j - sub_domain_loc[1]
c = k - sub_domain_loc[2]
if bulk[tuple(pixel)]==1:
material.add_pixel(pixel_index)
bulk_array[a,b,c] = 1
elif bulk[tuple(pixel)]==0:
vacuum.add_pixel(pixel_index)
elif bulk[tuple(pixel)]==2:
base.add_pixel(pixel_index)
#define solver parameters
#gradient = Stencils3D.trilinear_1q_finite_element
gradient = Stencils3D.averaged_upwind
weights = nb_quad * [1]
material.identify_temperature_loaded_quad_pts(cell, bulk_array.reshape(-1,1, order="F"))
#material.dt = 2.5e-5
material.T_m = 1650
material.alpha_thermal = 22e-6
#local_bulk_array = bulk[sub_domain_loc[0]:sub_domain_loc[0]+nb_subdomain_grid_pts[0], sub_domain_loc[1]:sub_domain_loc[1]+nb_subdomain_grid_pts[1], sub_domain_loc[2]:sub_domain_loc[2]+nb_subdomain_grid_pts[2]]
## use solver
krylov_solver = msp.solvers.KrylovSolverCG(cg_tol, maxiter, msp.Verbosity.Silent)
solver = msp.solvers.SolverNewtonCG(cell, krylov_solver, msp.Verbosity.Silent, newton_tol, equil_tol, maxiter, gradient, weights)
solver.formulation = msp.Formulation.small_strain
solver.initialise_cell()
#define file input/output parameters for muSpectre output
# output_filename = "layer_"+str(layer_no-1).zfill(3)+".nc"
# if comm_mu.rank == 0 and os.path.exists(output_filename):
# os.remove(output_filename)
output_filename = "layer_"+str(layer_no).zfill(3)+".nc"
if comm_mu.rank == 0 and os.path.exists(output_filename):
os.remove(output_filename)
comm_mu.barrier()
file_io_object_write = muGrid.FileIONetCDF(output_filename,
muGrid.FileIONetCDF.OpenMode.Write,
comm_mu)
#coordinates = cell.get_fields().register_real_field("coordinates", 3, 'quad')
#temperature_field = cell.get_fields().register_real_field("temperature2", 1, 'quad')
#vacuum_strain_field = cell.get_fields().register_real_field("vacuum_strain2", (3,3), "quad")
file_io_object_write.register_field_collection(field_collection=cell.fields)
file_io_object_write.register_field_collection(field_collection=material.collection)
#print("cell field names:\n", cell.get_field_names())
#initialize global time attribute (to be updated once filled)
file_io_object_write.write_global_attribute('timestamp', timestamp_arr)
#create pointer-like copies for fields
material_phase_alias = np.array(material_phase, copy=False)
#coordinates_alias = np.array(coordinates, copy=False)
#temperature_alias = np.array(temperature_field, copy=False)
#vacuum_strain_alias = np.array(vacuum_strain_field, copy=False)
#melt_bool = -1*np.ones(np.shape(material_phase), np.dtype(np.int32))
#assign values to the above created aliases
for pixel_id, pixel_coord in cell.pixels.enumerate():
i, j, k = pixel_coord[0], pixel_coord[1], pixel_coord[2]
for q in range(nb_quad):
# local coordinates on the processor
a = i - sub_domain_loc[0]
b = j - sub_domain_loc[1]
c = k - sub_domain_loc[2]
material_phase_alias[a, b, c] = int(bulk[i, j, k])
#coordinates_alias[0, q, a, b, c] = coords[i,j,k][0]+quad_coords[q,0]
#coordinates_alias[1, q, a, b, c] = coords[i,j,k][1]+quad_coords[q,1]
#coordinates_alias[2, q, a, b, c] = coords[i,j,k][2]+quad_coords[q,2]
#if k == nb_domain_grid_pts[2]-2:
#melt_bool[0,q,a,b,c] = 0
#use the temperature array to change the values using temperature
#array pointer (alias)
#temperature_alias[:,:,:,:,:] = update_temperature_field(cell, temperature_array)[:,:,:,:,:]
comm_mu.barrier()
if layer_no not in [10,30,50,70,60,90,130,170,210,250,268]:
from transfer_previous_layer_1q import transfer_previous_layer
strain_field = cell.get_fields().get_field("grad")
strain_field_alias = np.array(strain_field, copy=False)
stress_field = cell.get_fields().get_field("flux")
stress_field_alias = np.array(stress_field, copy=False)
size_array = cell.get_fields().get_field("flux").array()
if rank == 0:
previous_data = transfer_previous_layer(target_directory, layer_no-1, bulk, nb_domain_grid_pts, domain_lengths)
#, np.max(coords[:,:,:,0]), np.max(coords[:,:,:,1]), np.max(coords[:,:,:,2]), np.min(coords[:,:,:,0]), np.min(coords[:,:,:,1]), np.min(coords[:,:,:,2]))
else:
previous_data = None
comm_mu.barrier()
previous_data = comm.bcast(previous_data, root = 0)
[previous_stress, previous_strain, previous_epsilon_bar, previous_melt_pool, previous_material_temperature, previous_epsilon_plastic, previous_material_vacuum_strain] = previous_data
sub_dx, sub_dy, sub_dz = sub_domain_loc[0], sub_domain_loc[1], sub_domain_loc[2]
sub_gx, sub_gy, sub_gz = nb_subdomain_grid_pts[0], nb_subdomain_grid_pts[1], nb_subdomain_grid_pts[2]
previous_epsilon_bar = previous_epsilon_bar[sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
previous_melt_pool = previous_melt_pool[sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
previous_material_temperature = previous_material_temperature[sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
previous_epsilon_plastic = previous_epsilon_plastic[:,:,sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
previous_material_vacuum_strain = 0*previous_material_vacuum_strain[:,:,sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
#assign values to the above created aliases
for pixel_id, pixel_coord in cell.pixels.enumerate():
i, j, k = pixel_coord[0], pixel_coord[1], pixel_coord[2]
a = i - sub_domain_loc[0]
b = j - sub_domain_loc[1]
c = k - sub_domain_loc[2]
if k < nb_domain_grid_pts[2]-1:
for q in range(nb_quad):
# local coordinates on the processor
#strain_field_alias[:, :, q, a, b, c] = previous_strain[:, :, q, i, j, k]
stress_field_alias[:, :, q, a, b, c] = previous_stress[:, :, q, i, j, k]
#temperature_alias[0,q,a,b,c] = previous_temperature[0,q,i,j,k]
#vacuum_strain_alias[:, :, q, a, b, c] = previous_vacuum_strain[:, :, q, i, j, k]
#if k == nb_domain_grid_pts[2]-2:
#vacuum_strain_alias[:, :, q, a, b, c] = previous_strain[:, :, q, i, j, k]
material.update_temperature_field(cell, previous_material_temperature.reshape(-1,1, order="F"))
material.update_epsilon_bar_field(cell, previous_epsilon_bar.reshape(-1,1, order="F"))
material.update_melt_bool_field(cell, previous_melt_pool.reshape(-1,1, order="F"))
material.update_epsilon_plastic_field(cell, previous_epsilon_plastic.reshape(-1,1, order="F"))
material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
del(previous_stress, previous_strain, previous_epsilon_bar, previous_melt_pool, previous_material_temperature, previous_epsilon_plastic)
gc.collect()
else:
#use the temperature array to change the values using temperature
#array pointer (alias)
size_array = cell.get_fields().get_field("flux").array()
material.update_temperature_field(cell, update_temperature_field(cell, temperature_array).reshape(-1,1, order="F"))
material.update_epsilon_bar_field(cell, np.zeros_like(bulk_array).reshape(-1,1, order="F"))
material.update_epsilon_plastic_field(cell, np.zeros_like(size_array).reshape(-1,1, order="F"))
material.update_melt_bool_field(cell, melt_bool.reshape(-1,1, order="F"))
material.update_vacuum_strain_field(cell, np.zeros_like(size_array).reshape(-1,1, order="F"))
strain_field = cell.get_fields().get_field("grad")
strain_field_alias = np.array(strain_field, copy=False)
comm_mu.barrier()
comm_py.send([cell.subdomain_locations[0],cell.subdomain_locations[1],cell.subdomain_locations[2]], recieve_rank, tag=rank+size_py-procs)
comm_py.send([cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]], recieve_rank, tag=rank+size_py-procs)
end_time = MPI.Wtime()
#print("MUSPECTRE INIT \n Local rank = {}, took {} seconds".format(rank, end_time-start_time))
#make nb_Steps global by broadcasting
nb_steps = comm_py.bcast(nb_steps, root = 0)
start = timer.time()
comm_py.barrier()
#seperate operations for FEM and muSpectre communicators depending on ranks
if color == 1:
#temperature_array = np.zeros(nb_domain_grid_pts)
start_time = MPI.Wtime()
dummy_temperature = np.zeros((1,cell.nb_quad_pts, cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]))
if layer_no%2 == 0:
release_limit = 11910
else:
release_limit = 11870
#for all steps
for s in range(nb_steps):
step_start = MPI.Wtime()
starting_step = MPI.Wtime()
#if rank == 0:
# print('=== STEP {}/{} ==='.format(s+1, nb_steps))
# print(delta_t_vector[s], timestamp_arr[s])
#recieve temp_array from constant sender request
#temperature_array = np.zeros(nb_domain_grid_pts)
#if rank == 0: print("MUSPECTRE Step {} --- Recieve request created: {}".format(s, MPI.Wtime()- step_start))
material.dt = delta_t_vector[s]
req = comm_py.Irecv(dummy_temperature, source = recieve_rank, tag=s)
#if rank == 0: print("MUSPECTRE Step {} --- Recieve request done: {}".format(s, MPI.Wtime()- step_start))
#if (s+1)%500 == 0:
#field_1 = cell.get_fields().get_field("grad")
#field_1_alias = np.array(field_1, copy = False)
#field_3 = cell.get_globalised_internal_real_field("vacuum_strain").array().reshape((dim, dim, nb_quad)+tuple(nb_subdomain_grid_pts), order="f")
#print(rank, nb_domain_grid_pts[0]-nb_subdomain_grid_pts[0], sub_domain_loc[0], np.shape(strain_field_alias))
#print(rank, nb_domain_grid_pts[1]-nb_subdomain_grid_pts[1], sub_domain_loc[1], np.shape(strain_field_alias))
#print(rank, nb_domain_grid_pts[2]-nb_subdomain_grid_pts[2], sub_domain_loc[2], np.shape(strain_field_alias))
#strain_field_alias -= field_3
#field_3_alias = np.array(field_3, copy=False)
#field_3_alias[:,:,:,:,:] = 0
#strain_field_alias[:,:,:,-1,:,:] = 0
#strain_field_alias[:,:,:,:,-1,:] = 0
#strain_field_alias[:,:,:,:,:,-1] = 0
if s == release_limit:
# strain_field_alias[:,:,:,:,:,:] = 0
if layer_no == 268: material.update_vacuum_strain_field(cell, np.zeros_like(size_array).reshape(-1,1, order="F"))
else: material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
#print(rank, np.shape(field_1), np.shape(strain_field_alias), np.shape(field_3), nb_subdomain_grid_pts[0], nb_subdomain_grid_pts[1], nb_subdomain_grid_pts[2])
solve_start = MPI.Wtime()
#try:
#solve machanics problem
res = solver.solve_load_increment(strain_step)
#if rank ==0 : print(s, delta_t_vector[s], res.newton_norm, res.equil_norm)
#comm_mu.barrier()
#except RuntimeError:
# if rank == 0:
# print("failed to converge at step", s)
# strain_field_alias[:,:,:,:,:,-2:] *= 1.05
# strain_field_alias[:,:,:,:,-2:,:] *= 1.05
# strain_field_alias[:,:,:,-2:,:,:] *= 1.05
# material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
# res = solver.solve_load_increment(strain_step)
#file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
#file_io_object_write.update_global_attribute('timestamp', 'timestamp', timestamp_arr)
#file_io_object_write.close()
#break
comm_mu.barrier()
#if rank == 0: print("newton_norm: ", res.newton_norm, " equil norm: ", res.equil_norm)
#if rank == 0: print("MUSPECTRE Rank {} - Step {} --- Total Solve time: {}".format(rank, s, MPI.Wtime()- solve_start) )
#write all fields in .nc format TO DO:not all fields
#file_io_object_write.append_frame().write(["flux","grad", "epsilon_bar", "epsilon_plastic","temperature", "vacuum_strain", "melt_pool", "material2"])
#if (s+1)%5 == 0:
#print('=== STEP {}/{} ==='.format(s+1, nb_steps))
#write all fields in .nc format TO DO:not all fields
#file_io_object_write.append_frame().write(["coordinates", "flux", "grad", "material2","temperature2", "epsilon_bar", "temperature", "vacuum_strain", "epsilon_plastic"])
#if (s+1) == 100: break
#print("MUSPECTRE Step {} Rank {} --- Step done: {}".format(s, rank, MPI.Wtime()- step_start))
#make sure temperature array is recieved
step_start = MPI.Wtime()
req.wait()
#if rank == 0: print("MUSPECTRE Step {} --- Recieve request Wait done: {}".format(s, MPI.Wtime()- step_start))
#use the temperature array to change the values using temperature
#array pointer (alias)
#temperature_alias[:,:,:,:,:] = dummy_temperature
#dummy_temperature = np.ones_like(dummy_temperature)*2005 - 4*s
step_start = MPI.Wtime()
material.update_temperature_field(cell, dummy_temperature.reshape(-1,1, order="F"))
#if rank == 0: print("MUSPECTRE Step {} --- Temperature Update done: {}".format(s, MPI.Wtime()- step_start))
#if (s+1) <= 1910 and (s+1)>1810:
#file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
#if (s+1)%11816 == 0:
#if rank == 0: print('=== STEP {}/{} ==='.format(s, nb_steps))
#file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
#break
#if rank == 0: print("MUSPECTRE Step {} COMPLETE --- in: {}".format(s, MPI.Wtime()- starting_step))
#try:
#material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
res = solver.solve_load_increment(strain_step)
if rank ==0 : print(s, delta_t_vector[s], res.newton_norm, res.equil_norm)
#except RuntimeError:
# print("failed to converge at last step")
# strain_field_alias[:,:,:,:,:,:] = 0
# material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
# res = solver.solve_load_increment(strain_step)
#write all fields in .nc format TO DO:not all fields
file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
if layer_no == 333:
dummy_temperature[:] = 293.0
material.update_temperature_field(cell, dummy_temperature.reshape(-1,1, order="F"))
material.dt = 3000
res = solver.solve_load_increment(strain_step)
file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
end_time = MPI.Wtime()
if rank == 0: print("Total MUSPECTRE time: {}".format(end_time-start))
comm_mu.barrier()
if color == 0:
dummy_array = np.zeros((1,nb_quad)+tuple(nb_domain_grid_pts))
for s in range(nb_steps):
start_time = MPI.Wtime()
starting_step = MPI.Wtime()
#start constant sending request
#print("FENICS Step {} --- Send Requests created: {}".format(s, MPI.Wtime()-start_time))
#reqs = [None] * len(send_ranks)
#for p, proc_rank in enumerate(send_ranks):
# reqs[p] = comm_py.Isend([temperature_array, MPI.DOUBLE], proc_rank, tag=s)
#print("FENICS Step {} --- Send Requests done: {}".format(s, MPI.Wtime()-start_time))
#re-read the temperature for the next step
no += 1
vec_name = "/Temperature/vector_%d"%no
timestamp = hdf5_file.attributes(vec_name)['timestamp']
timestamp_arr[s] = timestamp
hdf5_file.read(temperature_field_store, vec_name)
#store it on a dummy array since we don't want to change
#temperature array before the sending/recieving is done
#which will be ensured by MPI.wait
#dummy_array = np.zeros((1,nb_quad)+tuple(nb_domain_grid_pts))
#if rank == 0: print("FENICS Step {} --- File Reading: {}".format(s, MPI.Wtime()-start_time))
#store the temperature on the array with the right index if point is found
#this creates comm.size number of temperature_arrays each non-zero only on
#the sub-mesh present in that rank
#mesh and temperature are divided differently accross communicators!! TO LOOK!
loop_time = MPI.Wtime()
#with np.nditer([bulk_local, dummy_array[0,0,:,:,:]], flags=['multi_index'], op_flags=[['readonly', 'arraymask'], ['writeonly', 'writemasked']]) as it:
# for iterable in it:
# if bulk_local[it.multi_index] == 1:
# for q in range(nb_quad):
# try:
# var = temperature_field_store(quad_coordinates_array[tuple((0,q))+it.multi_index])
# if var < 0:
# pass
# else:
# dummy_array[tuple((0,q))+it.multi_index] = var
# except RuntimeError:
# pass
for lmi_index, lmi in enumerate(local_multi_index):
val = temperature_field_store(quad_coordinates_array[lmi])
if val <= 0:
dummy_array[lmi] = 0
else:
dummy_array[lmi] = val
#if rank == 0: print("FENICS Step {} --- Looping done: {}".format(s, MPI.Wtime()-loop_time))
#make sure temperature sending is complete and create the new
#temperature array
start_time = MPI.Wtime()
if s != 0:
for p, proc_rank in enumerate(send_ranks):
reqs[p].wait()
#if rank == 0: print("FENICS Step {} --- Previous Requests Wait done: {}".format(s, MPI.Wtime()-start_time))
start_time = MPI.Wtime()
temperature_array = comm.allreduce(dummy_array, op=MPI.SUM)
temperature_array = temperature_array + 293
#if rank == 0: print("FENICS Step {} --- Temperature Reduce done: {}".format(s, MPI.Wtime()-start_time))
#start constant sending request
start_time = MPI.Wtime()
reqs = [None] * len(send_ranks)
for p, proc_rank in enumerate(send_ranks):
send_temp = np.array(temperature_array[:,:,list_subdomain_loc[proc_rank][0]:list_subdomain_loc[proc_rank][0]+list_nb_subdomain_grid_pts[proc_rank][0],list_subdomain_loc[proc_rank][1]:list_subdomain_loc[proc_rank][1]+list_nb_subdomain_grid_pts[proc_rank][1],list_subdomain_loc[proc_rank][2]:list_subdomain_loc[proc_rank][2]+list_nb_subdomain_grid_pts[proc_rank][2]])
#print(p,proc_rank, list_subdomain_loc[proc_rank], list_nb_subdomain_grid_pts[proc_rank])
#print('send_shape', np.shape(send_temp))
reqs[p] = comm_py.Isend([send_temp, MPI.DOUBLE], proc_rank, tag=s)
#if (s+1)%11816 == 0:
#print('=== STEP {}/{} ==='.format(s+1, nb_steps))
#break
#if rank == 0: print("FENICS Step {} --- Request sending done: {}".format(s, MPI.Wtime()-start_time))
#if rank == 0: print("FENICS Step {} COMPLETE --- in: {}".format(s, MPI.Wtime()- starting_step))
#if rank == 0: print("FENICS Step {} --- Step done: {}".format(s, MPI.Wtime()-start_time))
comm.barrier()
end_time = MPI.Wtime()
if rank == 0: print("Total FENICS time: {}".format(end_time-start))
#make timestamp array global by broadcasting
timestamp_arr = comm_py.bcast(timestamp_arr, root = 0)
#if on the muSpectre communicator, update the global timestamp
if color == 1:
file_io_object_write.update_global_attribute('timestamp', 'timestamp', timestamp_arr)
file_io_object_write.close()
comm_py.barrier()
if rank_py == 0: print("Total run time:", timer.time()-start)
if __name__=='__main__':
main()

Event Timeline