diff --git a/mechanical_model/netcdf2h5_hea_1q.py b/mechanical_model/netcdf2h5_hea_1q.py
index 7dbf159..d2dbd57 100644
--- a/mechanical_model/netcdf2h5_hea_1q.py
+++ b/mechanical_model/netcdf2h5_hea_1q.py
@@ -1,557 +1,569 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 """
 Created on Tue Aug  2 13:47:04 2022
 
 @author: ekinkubilay
 """
 
 #from netCDF4 import Dataset
 
 """
 nc_file_name = 'spectral_3D_output.nc'
 data = Dataset(nc_file_name, 'r')
 
 print(data)
 
 grad = data.variables['grad'][:,:,:,:,:,:]
 nx = data.dimensions['nx']
 
 print(type(grad))
 
 hf = h5py.File('data.h5', 'w')
 hf.create_dataset('dataset_1', data=grad)
 hf.close()
 
 """
 
 import os
 import sys
 import h5py
 
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/python"))
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/libmufft/python"))
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/libmugrid/python"))
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/python/muSpectre"))
 
 print("current working directory: '{}'".format(os.getcwd()))
 
 import muFFT
 import muSpectre as msp
 
 import numpy as np
 import matplotlib.pyplot as plt
 import muGrid
 from muFFT import Stencils2D
 from muFFT import Stencils3D
 import cProfile, pstats
 import time as timer
 import fenics 
 
 from petsc4py import PETSc
 import argparse
 
 
 from muSpectre.gradient_integration import get_complemented_positions_class_solver
 
 
 def parse_args():
     """
     Function to handle arguments
     """
     parser = argparse.ArgumentParser(description='Run given layer')
     parser.add_argument("layer_no", type=int, help='layer_number')
     parser.add_argument("factor", type=float, help='factor for material parametric analysis')
     parser.add_argument("target_directory", help='target directory for output/input files')
     args = parser.parse_args()
 
     return args
 
 def read_data_of_step(file_io_object_read, cell, solver, read_netcdf, material, read_frame=-1):
 
     nb_quad = cell.nb_quad_pts
     nb_domain_grid_pts = cell.nb_domain_grid_pts
     dim = cell.dims
     alpha_thermal = material.alpha_thermal
     T_m = material.T_m
     epsilon_sat = 0.35
-    sigma_for = 1.200e9 #GPa
+    sigma_for = 1.240e9 #GPa
     if read_netcdf:
         file_io_object_read.read(
             frame=read_frame,
             field_names=["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
 
     material_arr = cell.get_fields().get_field('material2').array().reshape((1, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((1, 1, -1), order='F').T.swapaxes(1, 2)
     flux_arr = cell.get_fields().get_field('flux').array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, dim, -1), order='F').T.swapaxes(1, 2)
     flux_arr_2 = np.array(flux_arr, copy=True)
     grad_arr = cell.get_fields().get_field('grad').array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, dim, -1), order='F').T.swapaxes(1, 2)
     epsilon_plastic = cell.get_globalised_current_real_field("epsilon_plastic").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, dim, -1), order='F').T.swapaxes(1, 2)
     epsilon_bar = cell.get_globalised_current_real_field("epsilon_bar").array().reshape((1, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((1, 1, -1), order='F').T.swapaxes(1, 2)
     vacuum_strain = cell.get_globalised_internal_real_field("vacuum_strain").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, dim, -1), order='F').T.swapaxes(1, 2)
     temperature_material = cell.get_globalised_internal_real_field("temperature").array().reshape((1, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((1, 1, -1), order='F').T.swapaxes(1, 2)
     melt_pool = cell.get_globalised_internal_int_field("melt_pool").array().reshape((1, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((1, 1, -1), order='F').T.swapaxes(1, 2)
     thermal_strain = np.zeros_like(grad_arr)
     thermal_strain_value = (temperature_material[:,0,0] - T_m)*alpha_thermal
     for i in range(3):
         thermal_strain[:,i,i] = np.array([thermal_strain_value])
     elastic_strain = grad_arr - epsilon_plastic - vacuum_strain - thermal_strain
     total_actual_strain = grad_arr - vacuum_strain
     sigma_forest = np.zeros_like(temperature_material)
     sigma_forest = sigma_for*(1-np.exp(-epsilon_bar[:,0,0]/epsilon_sat))*(1- temperature_material[:,0,0]/T_m)**0.5
     stress_deviatoric = np.zeros_like(flux_arr)
     trace_array = (flux_arr[:,0,0] +  flux_arr[:,1,1] +  flux_arr[:,2,2])/3
     stress_deviatoric += flux_arr
     for i in range(3):
         stress_deviatoric[:,i,i] -= trace_array 
     sigma_vonmises = np.sqrt((3/2)*np.sum(stress_deviatoric*stress_deviatoric, axis=(1,2)))
 
     #flux_integral = np.repeat(np.expand_dims(solver.projection.integrate(solver.flux.field).array(), 2), 6, axis=2).reshape((dim, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, 1, -1), order='F').T.swapaxes(1, 2)
     #flux_integral_nonaffine = np.repeat(np.expand_dims(solver.projection.integrate_nonaffine_displacements(solver.flux.field).array(), 2), 6, axis=2).reshape((dim, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, 1, -1), order='F').T.swapaxes(1, 2)
     
     #grad_integral = solver.projection.integrate(solver.grad.field).array().reshape((dim, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, 1, -1), order='F').T.swapaxes(1, 2)
 
     #strain_field = cell.get_fields().get_field("grad")
     #strain_field_alias = np.array(strain_field, copy=False)
     #vacuum_strain_field = cell.get_globalised_internal_real_field("vacuum_strain").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f')
     #strain_field_alias -= vacuum_strain_field
 
     [x_0, y_0, z_0], [x_displ, y_displ, z_displ] = get_complemented_positions_class_solver("0n", cell, solver, np.eye(3), periodically_complemented=True)
     total_displacement = np.transpose([x_displ.ravel(order='F'), y_displ.ravel(order='F'), z_displ.ravel(order='F')])
 
     #strain_field_alias[:,:,:,-1,:,:] = 0
     #strain_field_alias[:,:,:,:,-1,:] = 0
     #strain_field_alias[:,:,:,:,:,-1] = 0
     #strain_field_alias[:,:,:,25:,:,0:4] = 0
     grad_integral_nonaffine = solver.projection.integrate_nonaffine_displacements(solver.grad.field).array().reshape((dim, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, 1, -1), order='F').T.swapaxes(1, 2)
 
     [x_0, y_0, z_0], [x_displ, y_displ, z_displ] = get_complemented_positions_class_solver("0n", cell, solver, np.eye(3), periodically_complemented=True)
     actual_displacement = np.transpose([x_displ.ravel(order='F'), y_displ.ravel(order='F'), z_displ.ravel(order='F')])
 
 
     solver.projection.apply_projection(solver.flux.field)
     c_data = {
             "material": np.array([material_arr]),
             "stress": np.array([flux_arr_2]),
             "strain": np.array([grad_arr]),
             "epsilon_plastic": np.array([epsilon_plastic]),
             "epsilon_bar": np.array([epsilon_bar]),
             "vacuum_strain": np.array([vacuum_strain]),
             "temperature_material": np.array([temperature_material]),
             "melt_pool": np.array([melt_pool]),
             "epsilon_elastic": np.array([elastic_strain]),
             "sigma_forest": np.array([sigma_forest]),
             "sigma_vonmises": np.array([sigma_vonmises]),
             "stress_deviatoric": np.array([stress_deviatoric]),
             "total_actual_strain": np.array([total_actual_strain]),
             "projection": np.array([flux_arr]),
             #"flux_integral": np.array([flux_integral]),
             #"grad_integral": np.array([grad_integral]),
             #"flux_integral_nonaffine": np.array([flux_integral_nonaffine]),
             "grad_integral_nonaffine": np.array([grad_integral_nonaffine])
             }
 
     #file_io_object_read.read(frame=read_frame,field_names=["grad"])
     #strain_field = cell.get_fields().get_field("grad")
     #strain_field_alias = np.array(strain_field, copy=False)
     #strain_field_alias_copy = np.array(strain_field, copy=True)
     #strain_field_alias[material_field == 0] = 0
     #vacuum_strain_field = cell.get_globalised_internal_real_field("vacuum_strain").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f')
     #strain_field_alias -= vacuum_strain_field
     #strain_field_alias[:,:,:,-1,:,:] = 0
     #strain_field_alias[:,:,:,:,-1,:] = 0
     #strain_field_alias[:,:,:,:,:,-1] = 0
     #strain_field_alias[:,:,:,:,:,0] = 0
     #strain_field_alias[:,:,:,:,:,:] = np.roll(strain_field_alias, (nb_domain_grid_pts[0]//2, nb_domain_grid_pts[1]//2, nb_domain_grid_pts[2]//2), (3,4,5))
     #[x_0, y_0, z_0], [x_displ, y_displ, z_displ] = get_complemented_positions_class_solver("0n", cell, solver, np.eye(3), periodically_complemented=True)
     #x_displ = np.roll(x_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #y_displ = np.roll(y_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #z_displ = np.roll(z_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     
     #total_displacement = np.transpose([x_displ.ravel(order='F'), y_displ.ravel(order='F'), z_displ.ravel(order='F')])
 
     #file_io_object_read.read(frame=read_frame,field_names=["grad"])
     #strain_field_alias[:,:,:,:,:,:] = strain_field_alias_copy[:,:,:,:,:,:]
     #strain_field = cell.get_fields().get_field("grad")
     #strain_field_alias = np.array(strain_field, copy=False)
     #vacuum_strain_field = cell.get_globalised_internal_real_field("vacuum_strain").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f')
     #strain_field_alias -= vacuum_strain_field
 
     #strain_field_alias[:,:,:,-1,:,:] = 0
     #strain_field_alias[:,:,:,:,-1,:] = 0
     #strain_field_alias[:,:,:,:,:,-1] = 0
     #strain_field_alias[:,:,:,:,:,:] = np.roll(strain_field_alias, (nb_domain_grid_pts[0]//2, nb_domain_grid_pts[1]//2, nb_domain_grid_pts[2]//2), (3,4,5))
     #[x_0, y_0, z_0], [x_displ, y_displ, z_displ] = get_complemented_positions_class_solver("0n", cell, solver, np.eye(3), periodically_complemented=True)
     #x_displ = np.roll(x_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #y_displ = np.roll(y_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #z_displ = np.roll(z_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     
     #actual_displacement = np.transpose([x_displ.ravel(order='F'), y_displ.ravel(order='F'), z_displ.ravel(order='F')])
 
     #file_io_object_read.read(frame=read_frame,field_names=["grad"])
     #strain_field_alias = strain_field_alias_copy
     #strain_field = cell.get_fields().get_field("grad")
     #strain_field_alias = np.array(strain_field, copy=False)
     #strain_field_alias[:,:,:,-1,:,:] = 0
     #strain_field_alias[:,:,:,:,-1,:] = 0
     #strain_field_alias[:,:,:,:,:,-1] = 0
     #strain_field_alias[:,:,:,:,:,:] = np.roll(strain_field_alias, (nb_domain_grid_pts[0]//2, nb_domain_grid_pts[1]//2, nb_domain_grid_pts[2]//2), (3,4,5))
     #strain_field_alias[:,:,:,:,:] = cell.get_globalised_current_real_field("epsilon_plastic").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f')[:,:,:,:,:]
     #[x_0, y_0, z_0], [x_displ, y_displ, z_displ] = get_complemented_positions_class_solver("0n", cell, solver, np.eye(3), periodically_complemented=True)
     #x_displ = np.roll(x_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #y_displ = np.roll(y_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #z_displ = np.roll(z_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     
     #plastic_displacement = np.transpose([x_displ.ravel(order='F'), y_displ.ravel(order='F'), z_displ.ravel(order='F')])
 
  
     point_data = {'actual_displacement' : actual_displacement, 'total_displacement': total_displacement}
     if read_netcdf:
         file_io_object_read.read(frame=read_frame,field_names=["grad"])
 
     return c_data, point_data
 
 def main():
     
 
     args = parse_args()
     layer_no = args.layer_no
     factor = args.factor
     target_directory = args.target_directory
     
     from mpi4py import MPI
     comm_py = MPI.COMM_WORLD
     comm = muGrid.Communicator(comm_py)
     #rank = comm.rank
     #procs = comm.size
     #os.chdir('/scratch/kubilay/mechanical_model/results/mu_layer')
     os.chdir(target_directory)
     
     #get the geometry via fenics
     mesh = fenics.Mesh()
     filename = "/work/lammm/kubilay/mesh/layer_"+str(layer_no).zfill(3)+".xdmf"
     temperature_filename = "/work/lammm/kubilay/results/temperature/temperature_layer_"+str(layer_no).zfill(3)+".h5"
     f = fenics.XDMFFile(filename)
     f.read(mesh)
     fenics.MeshTransformation.rescale(mesh, 1e-3, fenics.Point(0,0,0)) #in m
     f.close()
     bounding_box = mesh.bounding_box_tree()
     
     #nb_steps = 20
     dim = 3
     nb_quad = 1
     
     x_max = np.max(mesh.coordinates()[:,0])
     x_min = np.min(mesh.coordinates()[:,0])
     y_max = np.max(mesh.coordinates()[:,1])
     y_min = np.min(mesh.coordinates()[:,1])
     z_max = np.max(mesh.coordinates()[:,2])
     z_min = np.min(mesh.coordinates()[:,2])
     
     
     xmax = np.array([np.NAN])
     ymax = np.array([np.NAN])
     zmax = np.array([np.NAN])
     xmin = np.array([np.NAN])
     ymin = np.array([np.NAN])
     zmin = np.array([np.NAN])
     
     dx, dy, dz = 0.0002, 0.0003125, 0.00003
     z_lim = 0.00801
     x_lim = 0.055
     element_dimensions = [dx, dy, dz]
     #nb_domain_grid_pts = [11,11,11]
     domain_lengths = [x_lim-x_min,y_max-y_min,z_max-z_lim]
     nb_domain_grid_pts = list(np.array(domain_lengths)/np.array(element_dimensions))
     nb_domain_grid_pts = [round(i) for i in nb_domain_grid_pts]
 
     #domain_lengths = [x_max-x_min,y_max-y_min,z_max-z_min]
     #domain_lengths = [x_max-x_min,y_max-y_min,z_max-z_min]
     #element_dimensions = np.array(domain_lengths)/np.array(nb_domain_grid_pts-np.ones(dim))
     
     #define constant material properties
     young = 162e9 #GPa
     poisson = 0.277 #unitless
     alpha_s = 0.55 #unitless
     Delta_E_bs = 1.13*factor**(2/3) #eV
     epsilon_sat = 0.35 # unitless
     sigma_gb = 0.150e9 #GPa
-    sigma_s0 = 0.254*factor**(4/3)*1e9 #GPa
-    sigma_f = 1.200e9 #GPa
+    #sigma_s0 = 0.254*factor**(4/3)*1e9 #GPa
+    sigma_s0 = 0.254*1e9 #GPa
+    sigma_f = 1.240e9 #GPa
     
     ## define the convergence tolerance for the Newton-Raphson increment
     newton_tol = 1e-6
     equil_tol = newton_tol
     ## tolerance for the solver of the linear cell
     cg_tol = -1e-2
     maxiter = 50000  # for linear cell solver
     
     
     #bulk = np.load("bulk.npy")  
     #coords = np.load("coords.npy")
     #nb_domain_grid_pts = np.load("nb_domain_grid_pts.npy")
     #domain_lengths = np.load("domain_lengths.npy")
     
     bulk = np.ones(nb_domain_grid_pts)
     #x_dim = np.linspace(x_min, x_lim, nb_domain_grid_pts[0])
     #y_dim = np.linspace(y_min, y_max, nb_domain_grid_pts[1])
     #z_dim = np.linspace(z_lim, z_max, nb_domain_grid_pts[2])
     
     #assign material
     #for i,x in enumerate(x_dim):
     #    for j,y in enumerate(y_dim):
     #        for k,z in enumerate(z_dim):
     #            if len(bounding_box.compute_collisions(fenics.Point(x+0.5*dx,y+0.5*dy,z+0.5*dz))) != 0:
     #                bulk[i,j,k] = 1
         
     #add base plate at the bottom and vacuum on top and sides
-    #vacuum on top layer (1)
+    #vacuum on top layer (2)
+    bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
+    nb_domain_grid_pts[2] += 1
+    domain_lengths[2] += element_dimensions[2]
     bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
     nb_domain_grid_pts[2] += 1
     domain_lengths[2] += element_dimensions[2]
-    #bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
-    #nb_domain_grid_pts[2] += 1
-    #domain_lengths[2] += element_dimensions[2]
 
     #vacuum on y plane (1)
     bulk = np.insert(bulk, nb_domain_grid_pts[1], 0, axis=1)
     nb_domain_grid_pts[1] += 1
     domain_lengths[1] += element_dimensions[1]
     #bulk = np.insert(bulk, nb_domain_grid_pts[1], 0, axis=1)
     #nb_domain_grid_pts[1] += 1
     #domain_lengths[1] += element_dimensions[1]
 
-    #vacuum on x plane (1)
+    #vacuum on x plane (2)
     bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
     nb_domain_grid_pts[0] += 1
     domain_lengths[0] += element_dimensions[0]
     bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
     nb_domain_grid_pts[0] += 1
     domain_lengths[0] += element_dimensions[0]
     #bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
     #nb_domain_grid_pts[0] += 1
     #domain_lengths[0] += element_dimensions[0]
     #bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
     #nb_domain_grid_pts[0] += 1
     #domain_lengths[0] += element_dimensions[0]
 
     #base layer on bottom layer (10)
     no_base_layer = 22
     for i in range(no_base_layer):
         bulk = np.insert(bulk, 0, 2, axis=2)
         nb_domain_grid_pts[2] += 1
         domain_lengths[2] += element_dimensions[2]
 
     #domain discretisation TO DO:muspectre has this array already
     x_dim = np.linspace(x_min, x_lim+1*element_dimensions[0], nb_domain_grid_pts[0])+0.5*dx
     y_dim = np.linspace(ymin, ymax+0*element_dimensions[1], nb_domain_grid_pts[1])+0.5*dy
-    z_dim = np.linspace(z_lim-no_base_layer*element_dimensions[2], zmax+0*element_dimensions[2], nb_domain_grid_pts[2])+0.5*dz
+    z_dim = np.linspace(z_lim-no_base_layer*element_dimensions[2], zmax+1*element_dimensions[2], nb_domain_grid_pts[2])+0.5*dz
     
     coords = np.zeros([nb_domain_grid_pts[0],nb_domain_grid_pts[1],nb_domain_grid_pts[2],3])
     for i in range(nb_domain_grid_pts[0]):
         coords[i,:,:,0] = x_dim[i]
     for i in range(nb_domain_grid_pts[1]):
         coords[:,i,:,1] = y_dim[i]
     for i in range(nb_domain_grid_pts[2]):
         coords[:,:,i,2] = z_dim[i]
             
     s_list = [(i,i+0.001) for i in np.linspace(0.005, 0.050, 26)]
     s_list2 = [(0.005, 0.55)]
     support_mask = np.ones_like(x_dim)
     support_mask2 = np.ones_like(x_dim)
     for i,x in enumerate(x_dim):
         loc = x
         for j,s in enumerate(s_list2):
             #print(loc, s[0], s[1])
             if loc>s[0] and loc<s[1]:
                 support_mask2[i] = 0
         for j,s in enumerate(s_list):
             #print(loc, s[0], s[1])
             if loc>s[0] and loc<s[1]:
                 support_mask[i] = 0
-    for l in range(no_base_layer):
-        for j,y in enumerate(y_dim):
+#    for l in range(no_base_layer):
+#        for j,y in enumerate(y_dim):
+#        #print(np.shape(bulk[:,j,0]), np.shape(support_mask))
+#        #print(support_mask[:,0])
+#            bulk[:,j,l] = bulk[:,j,l]*support_mask2
+
+#    for l in range(no_base_layer):
+    for j,y in enumerate(y_dim):
         #print(np.shape(bulk[:,j,0]), np.shape(support_mask))
         #print(support_mask[:,0])
-            bulk[:,j,l] = bulk[:,j,l]*support_mask2
-    bulk[:,-1,:] = 0
-    bulk[-1,:,:] = 0
-    bulk[-2,:,:] = 0
+        bulk[:,j,no_base_layer-1] = bulk[:,j,no_base_layer-1]*support_mask2
+    bulk[:,-1,no_base_layer-1] = 0
+    bulk[-1,:,no_base_layer-1] = 0
+    bulk[-2,:,no_base_layer-1] = 0
 
     cell = msp.cell.CellData.make(list(nb_domain_grid_pts), list(domain_lengths))
     cell.nb_quad_pts = nb_quad
     cell.get_fields().set_nb_sub_pts("quad", nb_quad)
      
     #define materials
     material = msp.material.MaterialHEA_3d.make(cell, "material", young, poisson, alpha_s, Delta_E_bs, epsilon_sat, sigma_gb, sigma_s0, sigma_f)
     vacuum = msp.material.MaterialLinearElastic1_3d.make(cell, "vacuum", 0.0, poisson)
     #base = msp.material.MaterialLinearElastic1_3d.make(cell, "base", 20*young, poisson)
     transverse_aniso = [3e9*617,3e9*236, 3e9*236, 0, 0, 0, 3e9*617, 3e9*236, 0, 0, 0, 3e9*617, 0, 0, 0, 3e9*190.3, 0, 0, 3e9*190.3, 0, 3e9*190.3]
     base = msp.material.MaterialLinearAnisotropic_3d.make(cell, "base", transverse_aniso)
 
     for pixel_index, pixel in enumerate(cell.pixels):
         if bulk[tuple(pixel)]==1:
             material.add_pixel(pixel_index)
         elif bulk[tuple(pixel)]==0:
             vacuum.add_pixel(pixel_index)
         elif bulk[tuple(pixel)]==2:
             base.add_pixel(pixel_index)
 
     #gradient = Stencils3D.trilinear_1q_finite_element
     gradient = Stencils3D.averaged_upwind
     weights = nb_quad*[1]
     
     ## use solver 
     krylov_solver = msp.solvers.KrylovSolverCG(cg_tol, maxiter)  
       
     solver = msp.solvers.SolverNewtonCG(cell, krylov_solver, msp.Verbosity.Full, 
                                         newton_tol, equil_tol, maxiter, gradient, weights)
     
     solver.formulation = msp.Formulation.small_strain
     solver.initialise_cell()
 
     
     output_filename = "layer_"+str(layer_no).zfill(3)+".nc"
     
     
     file_io_object_read = muGrid.FileIONetCDF(output_filename, 
                                          muGrid.FileIONetCDF.OpenMode.Read, 
                                          comm)
 
     cell.get_fields().set_nb_sub_pts("quad", nb_quad)
     material_phase = cell.get_fields().register_int_field("material2", 1, 'quad')
     #coordinates = cell.get_fields().register_real_field("coordinates", 3, 'quad')
     #temperature = cell.get_fields().register_real_field("temperature2", 1 , 'quad')
     #vacuum_strain_field = cell.get_fields().register_real_field("vacuum_strain2", (3,3), "quad")
 
     #material.identify_temperature_loaded_quad_pts(cell, bulk.reshape(-1,1,order="F"))
 
     file_io_object_read.register_field_collection(field_collection=cell.get_fields(),field_names=cell.get_field_names())
     file_io_object_read.register_field_collection(field_collection=material.collection)
     # register global fields of the cell which you want to write
     # print("cell field names:\n", cell.get_field_names())
     #cell_field_names = cell.get_field_names()
     
     # register global fields of the cell which you want to write
     #print("cell field names:\n", cell.get_field_names())
     #cell_field_names = cell.get_field_names()
     #file_io_object_read.register_field_collection(field_collection=cell.get_fields(),field_names=["coordinates", "flux",
     #                     "grad",
     #                     "incrF",
     #                     "material2",
     #                     "rhs",
     #                     "tangent", "temperature2", "vacuum_strain2"])
     #file_io_object_read.register_field_collection(field_collection=material.collection, field_names=["epsilon_bar"])
     
     
     #mat_fields = material.list_fields()
     #file_io_object_read.register_field_collection(field_collection=material.collection)
     
     
       
     # sub_domain_loc = cell.subdomain_locations
     # #material_phase = cell.get_fields().register_int_field("material", 1, 'quad')
     
     # material_phase_alias = np.array(material_phase, copy=False)
     # coordinates_alias = np.array(coordinates, copy=False)
     
     # for pixel_id, pixel_coord in cell.pixels.enumerate():
     #     i, j, k = pixel_coord[0], pixel_coord[1], pixel_coord[2]
     #     pix_mat = int(bulk[i, j, k])
     
     #     for q in range(nb_quad):
     #         # local coordinates on the processor
     #         a = i - sub_domain_loc[0]
     #         b = j - sub_domain_loc[1]
     #         c = k - sub_domain_loc[2]
     
     #         material_phase_alias[a, b, c] = pix_mat
     #         coordinates_alias[0, 0, a, b, c] = i
     #         coordinates_alias[1, 0, a, b, c] = j
     #         coordinates_alias[2, 0, a, b, c] = k
     
     timestamp_arr = file_io_object_read.read_global_attribute('timestamp')
     #print(timestamp_arr)
     nb_steps = len(timestamp_arr)
     start = timer.time()
 
     import meshio
     points = msp.linear_finite_elements.get_position_3d_helper(cell, solver=solver)
     nx, ny, nz = cell.nb_domain_grid_pts
     xc, yc, zc = np.mgrid[:nx, :ny, :nz]
     # Global node indices
     def c2i(xp, yp, zp):
         return xp + (nx+1) * (yp + (ny+1) * zp)
     cells = cells = np.swapaxes(
             [[c2i(xc, yc, zc), c2i(xc, yc+1, zc),
               c2i(xc+1, yc+1, zc), c2i(xc+1, yc, zc),c2i(xc, yc, zc+1), c2i(xc, yc+1, zc+1),
               c2i(xc+1, yc+1, zc+1), c2i(xc+1, yc, zc+1)]], 0, 1)
 
     cells = cells.reshape((8, -1), order='F').T
 
-    material.T_m = 1650
+    material.T_m = 1600
     material.alpha_thermal = 22e-6
 
     with meshio.xdmf.TimeSeriesWriter('mu_layer_'+str(layer_no).zfill(3)+'.xdmf') as writer:
         writer.write_points_cells(points, {"hexahedron": cells})
     
         c_data, point_data = read_data_of_step(file_io_object_read, cell, solver, True, material, read_frame=0)
         writer.write_data(timestamp_arr[0], cell_data=c_data, point_data=point_data)
 
         if layer_no == 333: 
 
             c_data, point_data = read_data_of_step(file_io_object_read, cell, solver, True, material, read_frame=1)
             writer.write_data(timestamp_arr[1], cell_data=c_data, point_data=point_data)
             strain_field = cell.get_fields().get_field("grad")
             strain_field_alias = np.array(strain_field, copy=False)
-            strain_field_alias[:,:,:,:,:,:] = 0
+            #strain_field_alias[:,:,:,:,:,:] = 0
+
+            material_phase_alias = np.array(material_phase, copy=False)
+            for pixel_id, pixel_coord in cell.pixels.enumerate():
+                i,j,k = pixel_coord[0], pixel_coord[1],  pixel_coord[2]
+                material_phase_alias[i,j,k] = int(bulk[i,j,k])
             #size_array = cell.get_fields().get_field("grad").array()
             #material.update_vacuum_strain_field(cell, np.zeros_like(size_array).reshape(-1,1,order="F"))
             material.dt = 3000
             #material.update_temperature_field(cell, 293*np.ones_like(material_phase).reshape(-1,1,order="F"))
             #strain_field = cell.get_fields().get_field("grad")
             #strain_field_alias = np.array(strain_field, copy=False)
             #strain_field_alias *= 0.0
             res = solver.solve_load_increment(np.zeros((3,3)))
 
             c_data, point_data = read_data_of_step(file_io_object_read, cell, solver, False, material)
             writer.write_data(timestamp_arr[1]+material.dt, cell_data=c_data, point_data=point_data)
 
            
         #for i in range(1):
         	
         #    print('=== STEP {}/{} ==='.format(i+1, nb_steps))
 
       
         #    if layer_no == 333:
                 #flux_arr = cell.get_fields().get_field('flux').array()
                 #grad_arr = cell.get_fields().get_field('grad').array()
                 #vacuum_strain_arr = cell.get_globalised_internal_real_field("vacuum_strain").array()
                 #flux_arr_alias = np.array(flux_arr, copy=False)
                 #grad_arr_alias = np.array(grad_arr, copy=False)
                 #vacuum_strain_arr_alias = np.array(vacuum_strain_arr, copy=False)
                 #flux_arr_alias[:,:,:,:,:] = 0
                 #grad_arr_alias[:,:,:,:,:] = 0
                 #vacuum_strain_arr_alias[:,:,:,:,:] = 0
                 #material.update_vacuum_strain_field(cell, vacuum_strain_arr_alias.reshape(-1,1, order="F"))
          #       material.update_temperature_field(cell, 293*np.ones_like(bulk_array.reshape(-1,1,order="F")))
          #       material.dt = 3000
                 #material.alpha_thermal = 0.0
          #       res = solver.solve_load_increment(np.zeros((3,3)))
 
             #msp.linear_finite_elements.write_3d_class(
             #"ekin_tutorial_3D_spectral_output_{}.xdmf".format(i), cell, solver, cell_data=c_data)
             comm_py.barrier()
             
             
     #stress = cell.get_fields().get_field('flux').array()
     #strain = cell.get_fields().get_field('grad').array()
     #temperature = cell.get_fields().get_field('temperature').array()
     #np.savez('previous_fields.npz', stress=stress, strain= strain, temperature=temperature)
     
     file_io_object_read.close()
     
     
 if __name__=='__main__':
 
 
     main()
diff --git a/mechanical_model/netcdf2h5_hea_1q.py b/mechanical_model/netcdf2h5_hea_1q_2.py
similarity index 94%
copy from mechanical_model/netcdf2h5_hea_1q.py
copy to mechanical_model/netcdf2h5_hea_1q_2.py
index 7dbf159..ee02cfb 100644
--- a/mechanical_model/netcdf2h5_hea_1q.py
+++ b/mechanical_model/netcdf2h5_hea_1q_2.py
@@ -1,557 +1,575 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 """
 Created on Tue Aug  2 13:47:04 2022
 
 @author: ekinkubilay
 """
 
 #from netCDF4 import Dataset
 
 """
 nc_file_name = 'spectral_3D_output.nc'
 data = Dataset(nc_file_name, 'r')
 
 print(data)
 
 grad = data.variables['grad'][:,:,:,:,:,:]
 nx = data.dimensions['nx']
 
 print(type(grad))
 
 hf = h5py.File('data.h5', 'w')
 hf.create_dataset('dataset_1', data=grad)
 hf.close()
 
 """
 
 import os
 import sys
 import h5py
 
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/python"))
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/libmufft/python"))
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/libmugrid/python"))
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/python/muSpectre"))
 
 print("current working directory: '{}'".format(os.getcwd()))
 
 import muFFT
 import muSpectre as msp
 
 import numpy as np
 import matplotlib.pyplot as plt
 import muGrid
 from muFFT import Stencils2D
 from muFFT import Stencils3D
 import cProfile, pstats
 import time as timer
 import fenics 
 
 from petsc4py import PETSc
 import argparse
 
 
 from muSpectre.gradient_integration import get_complemented_positions_class_solver
 
 
 def parse_args():
     """
     Function to handle arguments
     """
     parser = argparse.ArgumentParser(description='Run given layer')
     parser.add_argument("layer_no", type=int, help='layer_number')
     parser.add_argument("factor", type=float, help='factor for material parametric analysis')
     parser.add_argument("target_directory", help='target directory for output/input files')
     args = parser.parse_args()
 
     return args
 
 def read_data_of_step(file_io_object_read, cell, solver, read_netcdf, material, read_frame=-1):
 
     nb_quad = cell.nb_quad_pts
     nb_domain_grid_pts = cell.nb_domain_grid_pts
     dim = cell.dims
     alpha_thermal = material.alpha_thermal
     T_m = material.T_m
     epsilon_sat = 0.35
-    sigma_for = 1.200e9 #GPa
+    sigma_for = 1.240e9 #GPa
     if read_netcdf:
         file_io_object_read.read(
             frame=read_frame,
             field_names=["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
 
     material_arr = cell.get_fields().get_field('material2').array().reshape((1, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((1, 1, -1), order='F').T.swapaxes(1, 2)
     flux_arr = cell.get_fields().get_field('flux').array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, dim, -1), order='F').T.swapaxes(1, 2)
     flux_arr_2 = np.array(flux_arr, copy=True)
     grad_arr = cell.get_fields().get_field('grad').array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, dim, -1), order='F').T.swapaxes(1, 2)
     epsilon_plastic = cell.get_globalised_current_real_field("epsilon_plastic").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, dim, -1), order='F').T.swapaxes(1, 2)
     epsilon_bar = cell.get_globalised_current_real_field("epsilon_bar").array().reshape((1, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((1, 1, -1), order='F').T.swapaxes(1, 2)
     vacuum_strain = cell.get_globalised_internal_real_field("vacuum_strain").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, dim, -1), order='F').T.swapaxes(1, 2)
     temperature_material = cell.get_globalised_internal_real_field("temperature").array().reshape((1, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((1, 1, -1), order='F').T.swapaxes(1, 2)
     melt_pool = cell.get_globalised_internal_int_field("melt_pool").array().reshape((1, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((1, 1, -1), order='F').T.swapaxes(1, 2)
     thermal_strain = np.zeros_like(grad_arr)
     thermal_strain_value = (temperature_material[:,0,0] - T_m)*alpha_thermal
     for i in range(3):
         thermal_strain[:,i,i] = np.array([thermal_strain_value])
     elastic_strain = grad_arr - epsilon_plastic - vacuum_strain - thermal_strain
     total_actual_strain = grad_arr - vacuum_strain
     sigma_forest = np.zeros_like(temperature_material)
-    sigma_forest = sigma_for*(1-np.exp(-epsilon_bar[:,0,0]/epsilon_sat))*(1- temperature_material[:,0,0]/T_m)**0.5
+    sigma_forest = sigma_for*(1-np.exp(-epsilon_bar[:,0,0]/epsilon_sat))*(1- temperature_material[:,0,0]/T_m)**0.125
     stress_deviatoric = np.zeros_like(flux_arr)
     trace_array = (flux_arr[:,0,0] +  flux_arr[:,1,1] +  flux_arr[:,2,2])/3
     stress_deviatoric += flux_arr
     for i in range(3):
         stress_deviatoric[:,i,i] -= trace_array 
     sigma_vonmises = np.sqrt((3/2)*np.sum(stress_deviatoric*stress_deviatoric, axis=(1,2)))
 
     #flux_integral = np.repeat(np.expand_dims(solver.projection.integrate(solver.flux.field).array(), 2), 6, axis=2).reshape((dim, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, 1, -1), order='F').T.swapaxes(1, 2)
     #flux_integral_nonaffine = np.repeat(np.expand_dims(solver.projection.integrate_nonaffine_displacements(solver.flux.field).array(), 2), 6, axis=2).reshape((dim, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, 1, -1), order='F').T.swapaxes(1, 2)
     
     #grad_integral = solver.projection.integrate(solver.grad.field).array().reshape((dim, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, 1, -1), order='F').T.swapaxes(1, 2)
 
     #strain_field = cell.get_fields().get_field("grad")
     #strain_field_alias = np.array(strain_field, copy=False)
     #vacuum_strain_field = cell.get_globalised_internal_real_field("vacuum_strain").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f')
     #strain_field_alias -= vacuum_strain_field
 
     [x_0, y_0, z_0], [x_displ, y_displ, z_displ] = get_complemented_positions_class_solver("0n", cell, solver, np.eye(3), periodically_complemented=True)
     total_displacement = np.transpose([x_displ.ravel(order='F'), y_displ.ravel(order='F'), z_displ.ravel(order='F')])
 
     #strain_field_alias[:,:,:,-1,:,:] = 0
     #strain_field_alias[:,:,:,:,-1,:] = 0
     #strain_field_alias[:,:,:,:,:,-1] = 0
     #strain_field_alias[:,:,:,25:,:,0:4] = 0
     grad_integral_nonaffine = solver.projection.integrate_nonaffine_displacements(solver.grad.field).array().reshape((dim, 1, nb_quad) + tuple(nb_domain_grid_pts),order='f').reshape((dim, 1, -1), order='F').T.swapaxes(1, 2)
 
     [x_0, y_0, z_0], [x_displ, y_displ, z_displ] = get_complemented_positions_class_solver("0n", cell, solver, np.eye(3), periodically_complemented=True)
     actual_displacement = np.transpose([x_displ.ravel(order='F'), y_displ.ravel(order='F'), z_displ.ravel(order='F')])
 
 
     solver.projection.apply_projection(solver.flux.field)
     c_data = {
             "material": np.array([material_arr]),
             "stress": np.array([flux_arr_2]),
             "strain": np.array([grad_arr]),
             "epsilon_plastic": np.array([epsilon_plastic]),
             "epsilon_bar": np.array([epsilon_bar]),
             "vacuum_strain": np.array([vacuum_strain]),
             "temperature_material": np.array([temperature_material]),
             "melt_pool": np.array([melt_pool]),
             "epsilon_elastic": np.array([elastic_strain]),
             "sigma_forest": np.array([sigma_forest]),
             "sigma_vonmises": np.array([sigma_vonmises]),
             "stress_deviatoric": np.array([stress_deviatoric]),
             "total_actual_strain": np.array([total_actual_strain]),
             "projection": np.array([flux_arr]),
             #"flux_integral": np.array([flux_integral]),
             #"grad_integral": np.array([grad_integral]),
             #"flux_integral_nonaffine": np.array([flux_integral_nonaffine]),
             "grad_integral_nonaffine": np.array([grad_integral_nonaffine])
             }
 
     #file_io_object_read.read(frame=read_frame,field_names=["grad"])
     #strain_field = cell.get_fields().get_field("grad")
     #strain_field_alias = np.array(strain_field, copy=False)
     #strain_field_alias_copy = np.array(strain_field, copy=True)
     #strain_field_alias[material_field == 0] = 0
     #vacuum_strain_field = cell.get_globalised_internal_real_field("vacuum_strain").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f')
     #strain_field_alias -= vacuum_strain_field
     #strain_field_alias[:,:,:,-1,:,:] = 0
     #strain_field_alias[:,:,:,:,-1,:] = 0
     #strain_field_alias[:,:,:,:,:,-1] = 0
     #strain_field_alias[:,:,:,:,:,0] = 0
     #strain_field_alias[:,:,:,:,:,:] = np.roll(strain_field_alias, (nb_domain_grid_pts[0]//2, nb_domain_grid_pts[1]//2, nb_domain_grid_pts[2]//2), (3,4,5))
     #[x_0, y_0, z_0], [x_displ, y_displ, z_displ] = get_complemented_positions_class_solver("0n", cell, solver, np.eye(3), periodically_complemented=True)
     #x_displ = np.roll(x_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #y_displ = np.roll(y_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #z_displ = np.roll(z_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     
     #total_displacement = np.transpose([x_displ.ravel(order='F'), y_displ.ravel(order='F'), z_displ.ravel(order='F')])
 
     #file_io_object_read.read(frame=read_frame,field_names=["grad"])
     #strain_field_alias[:,:,:,:,:,:] = strain_field_alias_copy[:,:,:,:,:,:]
     #strain_field = cell.get_fields().get_field("grad")
     #strain_field_alias = np.array(strain_field, copy=False)
     #vacuum_strain_field = cell.get_globalised_internal_real_field("vacuum_strain").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f')
     #strain_field_alias -= vacuum_strain_field
 
     #strain_field_alias[:,:,:,-1,:,:] = 0
     #strain_field_alias[:,:,:,:,-1,:] = 0
     #strain_field_alias[:,:,:,:,:,-1] = 0
     #strain_field_alias[:,:,:,:,:,:] = np.roll(strain_field_alias, (nb_domain_grid_pts[0]//2, nb_domain_grid_pts[1]//2, nb_domain_grid_pts[2]//2), (3,4,5))
     #[x_0, y_0, z_0], [x_displ, y_displ, z_displ] = get_complemented_positions_class_solver("0n", cell, solver, np.eye(3), periodically_complemented=True)
     #x_displ = np.roll(x_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #y_displ = np.roll(y_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #z_displ = np.roll(z_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     
     #actual_displacement = np.transpose([x_displ.ravel(order='F'), y_displ.ravel(order='F'), z_displ.ravel(order='F')])
 
     #file_io_object_read.read(frame=read_frame,field_names=["grad"])
     #strain_field_alias = strain_field_alias_copy
     #strain_field = cell.get_fields().get_field("grad")
     #strain_field_alias = np.array(strain_field, copy=False)
     #strain_field_alias[:,:,:,-1,:,:] = 0
     #strain_field_alias[:,:,:,:,-1,:] = 0
     #strain_field_alias[:,:,:,:,:,-1] = 0
     #strain_field_alias[:,:,:,:,:,:] = np.roll(strain_field_alias, (nb_domain_grid_pts[0]//2, nb_domain_grid_pts[1]//2, nb_domain_grid_pts[2]//2), (3,4,5))
     #strain_field_alias[:,:,:,:,:] = cell.get_globalised_current_real_field("epsilon_plastic").array().reshape((dim, dim, nb_quad) + tuple(nb_domain_grid_pts),order='f')[:,:,:,:,:]
     #[x_0, y_0, z_0], [x_displ, y_displ, z_displ] = get_complemented_positions_class_solver("0n", cell, solver, np.eye(3), periodically_complemented=True)
     #x_displ = np.roll(x_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #y_displ = np.roll(y_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     #z_displ = np.roll(z_displ, (-(nb_domain_grid_pts[0]+1)//2, -(nb_domain_grid_pts[1]+1)//2, -(nb_domain_grid_pts[2]+1)//2), (0,1,2))
     
     #plastic_displacement = np.transpose([x_displ.ravel(order='F'), y_displ.ravel(order='F'), z_displ.ravel(order='F')])
 
  
     point_data = {'actual_displacement' : actual_displacement, 'total_displacement': total_displacement}
     if read_netcdf:
         file_io_object_read.read(frame=read_frame,field_names=["grad"])
 
     return c_data, point_data
 
 def main():
     
 
     args = parse_args()
     layer_no = args.layer_no
     factor = args.factor
     target_directory = args.target_directory
     
     from mpi4py import MPI
     comm_py = MPI.COMM_WORLD
     comm = muGrid.Communicator(comm_py)
     #rank = comm.rank
     #procs = comm.size
     #os.chdir('/scratch/kubilay/mechanical_model/results/mu_layer')
     os.chdir(target_directory)
     
     #get the geometry via fenics
     mesh = fenics.Mesh()
     filename = "/work/lammm/kubilay/mesh/layer_"+str(layer_no).zfill(3)+".xdmf"
     temperature_filename = "/work/lammm/kubilay/results/temperature/temperature_layer_"+str(layer_no).zfill(3)+".h5"
     f = fenics.XDMFFile(filename)
     f.read(mesh)
     fenics.MeshTransformation.rescale(mesh, 1e-3, fenics.Point(0,0,0)) #in m
     f.close()
     bounding_box = mesh.bounding_box_tree()
     
     #nb_steps = 20
     dim = 3
     nb_quad = 1
     
     x_max = np.max(mesh.coordinates()[:,0])
     x_min = np.min(mesh.coordinates()[:,0])
     y_max = np.max(mesh.coordinates()[:,1])
     y_min = np.min(mesh.coordinates()[:,1])
     z_max = np.max(mesh.coordinates()[:,2])
     z_min = np.min(mesh.coordinates()[:,2])
     
     
     xmax = np.array([np.NAN])
     ymax = np.array([np.NAN])
     zmax = np.array([np.NAN])
     xmin = np.array([np.NAN])
     ymin = np.array([np.NAN])
     zmin = np.array([np.NAN])
     
     dx, dy, dz = 0.0002, 0.0003125, 0.00003
     z_lim = 0.00801
     x_lim = 0.055
     element_dimensions = [dx, dy, dz]
     #nb_domain_grid_pts = [11,11,11]
     domain_lengths = [x_lim-x_min,y_max-y_min,z_max-z_lim]
     nb_domain_grid_pts = list(np.array(domain_lengths)/np.array(element_dimensions))
     nb_domain_grid_pts = [round(i) for i in nb_domain_grid_pts]
 
     #domain_lengths = [x_max-x_min,y_max-y_min,z_max-z_min]
     #domain_lengths = [x_max-x_min,y_max-y_min,z_max-z_min]
     #element_dimensions = np.array(domain_lengths)/np.array(nb_domain_grid_pts-np.ones(dim))
     
     #define constant material properties
     young = 162e9 #GPa
     poisson = 0.277 #unitless
     alpha_s = 0.55 #unitless
     Delta_E_bs = 1.13*factor**(2/3) #eV
     epsilon_sat = 0.35 # unitless
     sigma_gb = 0.150e9 #GPa
     sigma_s0 = 0.254*factor**(4/3)*1e9 #GPa
-    sigma_f = 1.200e9 #GPa
+    sigma_f = 1.240e9 #GPa
     
     ## define the convergence tolerance for the Newton-Raphson increment
     newton_tol = 1e-6
     equil_tol = newton_tol
     ## tolerance for the solver of the linear cell
     cg_tol = -1e-2
     maxiter = 50000  # for linear cell solver
     
     
     #bulk = np.load("bulk.npy")  
     #coords = np.load("coords.npy")
     #nb_domain_grid_pts = np.load("nb_domain_grid_pts.npy")
     #domain_lengths = np.load("domain_lengths.npy")
     
     bulk = np.ones(nb_domain_grid_pts)
     #x_dim = np.linspace(x_min, x_lim, nb_domain_grid_pts[0])
     #y_dim = np.linspace(y_min, y_max, nb_domain_grid_pts[1])
     #z_dim = np.linspace(z_lim, z_max, nb_domain_grid_pts[2])
     
     #assign material
     #for i,x in enumerate(x_dim):
     #    for j,y in enumerate(y_dim):
     #        for k,z in enumerate(z_dim):
     #            if len(bounding_box.compute_collisions(fenics.Point(x+0.5*dx,y+0.5*dy,z+0.5*dz))) != 0:
     #                bulk[i,j,k] = 1
         
     #add base plate at the bottom and vacuum on top and sides
-    #vacuum on top layer (1)
+    #vacuum on top layer (2)
+    bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
+    nb_domain_grid_pts[2] += 1
+    domain_lengths[2] += element_dimensions[2]
     bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
     nb_domain_grid_pts[2] += 1
     domain_lengths[2] += element_dimensions[2]
-    #bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
-    #nb_domain_grid_pts[2] += 1
-    #domain_lengths[2] += element_dimensions[2]
 
     #vacuum on y plane (1)
     bulk = np.insert(bulk, nb_domain_grid_pts[1], 0, axis=1)
     nb_domain_grid_pts[1] += 1
     domain_lengths[1] += element_dimensions[1]
     #bulk = np.insert(bulk, nb_domain_grid_pts[1], 0, axis=1)
     #nb_domain_grid_pts[1] += 1
     #domain_lengths[1] += element_dimensions[1]
 
-    #vacuum on x plane (1)
+    #vacuum on x plane (2)
     bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
     nb_domain_grid_pts[0] += 1
     domain_lengths[0] += element_dimensions[0]
     bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
     nb_domain_grid_pts[0] += 1
     domain_lengths[0] += element_dimensions[0]
     #bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
     #nb_domain_grid_pts[0] += 1
     #domain_lengths[0] += element_dimensions[0]
     #bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
     #nb_domain_grid_pts[0] += 1
     #domain_lengths[0] += element_dimensions[0]
 
     #base layer on bottom layer (10)
     no_base_layer = 22
     for i in range(no_base_layer):
         bulk = np.insert(bulk, 0, 2, axis=2)
         nb_domain_grid_pts[2] += 1
         domain_lengths[2] += element_dimensions[2]
 
     #domain discretisation TO DO:muspectre has this array already
     x_dim = np.linspace(x_min, x_lim+1*element_dimensions[0], nb_domain_grid_pts[0])+0.5*dx
     y_dim = np.linspace(ymin, ymax+0*element_dimensions[1], nb_domain_grid_pts[1])+0.5*dy
-    z_dim = np.linspace(z_lim-no_base_layer*element_dimensions[2], zmax+0*element_dimensions[2], nb_domain_grid_pts[2])+0.5*dz
+    z_dim = np.linspace(z_lim-no_base_layer*element_dimensions[2], zmax+1*element_dimensions[2], nb_domain_grid_pts[2])+0.5*dz
     
     coords = np.zeros([nb_domain_grid_pts[0],nb_domain_grid_pts[1],nb_domain_grid_pts[2],3])
     for i in range(nb_domain_grid_pts[0]):
         coords[i,:,:,0] = x_dim[i]
     for i in range(nb_domain_grid_pts[1]):
         coords[:,i,:,1] = y_dim[i]
     for i in range(nb_domain_grid_pts[2]):
         coords[:,:,i,2] = z_dim[i]
             
     s_list = [(i,i+0.001) for i in np.linspace(0.005, 0.050, 26)]
     s_list2 = [(0.005, 0.55)]
     support_mask = np.ones_like(x_dim)
     support_mask2 = np.ones_like(x_dim)
     for i,x in enumerate(x_dim):
         loc = x
         for j,s in enumerate(s_list2):
             #print(loc, s[0], s[1])
             if loc>s[0] and loc<s[1]:
                 support_mask2[i] = 0
         for j,s in enumerate(s_list):
             #print(loc, s[0], s[1])
             if loc>s[0] and loc<s[1]:
                 support_mask[i] = 0
-    for l in range(no_base_layer):
-        for j,y in enumerate(y_dim):
+#    for l in range(no_base_layer):
+#        for j,y in enumerate(y_dim):
+#        #print(np.shape(bulk[:,j,0]), np.shape(support_mask))
+#        #print(support_mask[:,0])
+#            bulk[:,j,l] = bulk[:,j,l]*support_mask2
+
+#    for l in range(no_base_layer):
+    for j,y in enumerate(y_dim):
         #print(np.shape(bulk[:,j,0]), np.shape(support_mask))
         #print(support_mask[:,0])
-            bulk[:,j,l] = bulk[:,j,l]*support_mask2
+        bulk[:,j,no_base_layer-1] = bulk[:,j,no_base_layer-1]*support_mask2
+    
+    bulk[:,-1,no_base_layer-1] = 0
+    bulk[-1,:,no_base_layer-1] = 0
+    bulk[-2,:,no_base_layer-1] = 0
+    """
     bulk[:,-1,:] = 0
     bulk[-1,:,:] = 0
     bulk[-2,:,:] = 0
-
+    """
     cell = msp.cell.CellData.make(list(nb_domain_grid_pts), list(domain_lengths))
     cell.nb_quad_pts = nb_quad
     cell.get_fields().set_nb_sub_pts("quad", nb_quad)
      
     #define materials
-    material = msp.material.MaterialHEA_3d.make(cell, "material", young, poisson, alpha_s, Delta_E_bs, epsilon_sat, sigma_gb, sigma_s0, sigma_f)
+    alpha_f = 0.55
+    Delta_E_bf = 8
+    material = msp.material.MaterialHEA_2_3d.make(cell, "material", young, poisson, alpha_s, Delta_E_bs, alpha_f, Delta_E_bf, epsilon_sat, sigma_gb, sigma_s0, sigma_f)
     vacuum = msp.material.MaterialLinearElastic1_3d.make(cell, "vacuum", 0.0, poisson)
     #base = msp.material.MaterialLinearElastic1_3d.make(cell, "base", 20*young, poisson)
     transverse_aniso = [3e9*617,3e9*236, 3e9*236, 0, 0, 0, 3e9*617, 3e9*236, 0, 0, 0, 3e9*617, 0, 0, 0, 3e9*190.3, 0, 0, 3e9*190.3, 0, 3e9*190.3]
     base = msp.material.MaterialLinearAnisotropic_3d.make(cell, "base", transverse_aniso)
 
     for pixel_index, pixel in enumerate(cell.pixels):
         if bulk[tuple(pixel)]==1:
             material.add_pixel(pixel_index)
         elif bulk[tuple(pixel)]==0:
             vacuum.add_pixel(pixel_index)
         elif bulk[tuple(pixel)]==2:
             base.add_pixel(pixel_index)
 
     #gradient = Stencils3D.trilinear_1q_finite_element
     gradient = Stencils3D.averaged_upwind
     weights = nb_quad*[1]
     
     ## use solver 
     krylov_solver = msp.solvers.KrylovSolverCG(cg_tol, maxiter)  
       
     solver = msp.solvers.SolverNewtonCG(cell, krylov_solver, msp.Verbosity.Full, 
                                         newton_tol, equil_tol, maxiter, gradient, weights)
     
     solver.formulation = msp.Formulation.small_strain
     solver.initialise_cell()
 
     
     output_filename = "layer_"+str(layer_no).zfill(3)+".nc"
     
     
     file_io_object_read = muGrid.FileIONetCDF(output_filename, 
                                          muGrid.FileIONetCDF.OpenMode.Read, 
                                          comm)
 
     cell.get_fields().set_nb_sub_pts("quad", nb_quad)
     material_phase = cell.get_fields().register_int_field("material2", 1, 'quad')
     #coordinates = cell.get_fields().register_real_field("coordinates", 3, 'quad')
     #temperature = cell.get_fields().register_real_field("temperature2", 1 , 'quad')
     #vacuum_strain_field = cell.get_fields().register_real_field("vacuum_strain2", (3,3), "quad")
 
     #material.identify_temperature_loaded_quad_pts(cell, bulk.reshape(-1,1,order="F"))
 
     file_io_object_read.register_field_collection(field_collection=cell.get_fields(),field_names=cell.get_field_names())
     file_io_object_read.register_field_collection(field_collection=material.collection)
     # register global fields of the cell which you want to write
     # print("cell field names:\n", cell.get_field_names())
     #cell_field_names = cell.get_field_names()
     
     # register global fields of the cell which you want to write
     #print("cell field names:\n", cell.get_field_names())
     #cell_field_names = cell.get_field_names()
     #file_io_object_read.register_field_collection(field_collection=cell.get_fields(),field_names=["coordinates", "flux",
     #                     "grad",
     #                     "incrF",
     #                     "material2",
     #                     "rhs",
     #                     "tangent", "temperature2", "vacuum_strain2"])
     #file_io_object_read.register_field_collection(field_collection=material.collection, field_names=["epsilon_bar"])
     
     
     #mat_fields = material.list_fields()
     #file_io_object_read.register_field_collection(field_collection=material.collection)
     
     
       
     # sub_domain_loc = cell.subdomain_locations
     # #material_phase = cell.get_fields().register_int_field("material", 1, 'quad')
     
     # material_phase_alias = np.array(material_phase, copy=False)
     # coordinates_alias = np.array(coordinates, copy=False)
     
     # for pixel_id, pixel_coord in cell.pixels.enumerate():
     #     i, j, k = pixel_coord[0], pixel_coord[1], pixel_coord[2]
     #     pix_mat = int(bulk[i, j, k])
     
     #     for q in range(nb_quad):
     #         # local coordinates on the processor
     #         a = i - sub_domain_loc[0]
     #         b = j - sub_domain_loc[1]
     #         c = k - sub_domain_loc[2]
     
     #         material_phase_alias[a, b, c] = pix_mat
     #         coordinates_alias[0, 0, a, b, c] = i
     #         coordinates_alias[1, 0, a, b, c] = j
     #         coordinates_alias[2, 0, a, b, c] = k
     
     timestamp_arr = file_io_object_read.read_global_attribute('timestamp')
     #print(timestamp_arr)
     nb_steps = len(timestamp_arr)
     start = timer.time()
 
     import meshio
     points = msp.linear_finite_elements.get_position_3d_helper(cell, solver=solver)
     nx, ny, nz = cell.nb_domain_grid_pts
     xc, yc, zc = np.mgrid[:nx, :ny, :nz]
     # Global node indices
     def c2i(xp, yp, zp):
         return xp + (nx+1) * (yp + (ny+1) * zp)
     cells = cells = np.swapaxes(
             [[c2i(xc, yc, zc), c2i(xc, yc+1, zc),
               c2i(xc+1, yc+1, zc), c2i(xc+1, yc, zc),c2i(xc, yc, zc+1), c2i(xc, yc+1, zc+1),
               c2i(xc+1, yc+1, zc+1), c2i(xc+1, yc, zc+1)]], 0, 1)
 
     cells = cells.reshape((8, -1), order='F').T
 
-    material.T_m = 1650
+    material.T_m = 1600
     material.alpha_thermal = 22e-6
 
     with meshio.xdmf.TimeSeriesWriter('mu_layer_'+str(layer_no).zfill(3)+'.xdmf') as writer:
         writer.write_points_cells(points, {"hexahedron": cells})
     
         c_data, point_data = read_data_of_step(file_io_object_read, cell, solver, True, material, read_frame=0)
         writer.write_data(timestamp_arr[0], cell_data=c_data, point_data=point_data)
 
         if layer_no == 333: 
 
             c_data, point_data = read_data_of_step(file_io_object_read, cell, solver, True, material, read_frame=1)
             writer.write_data(timestamp_arr[1], cell_data=c_data, point_data=point_data)
             strain_field = cell.get_fields().get_field("grad")
             strain_field_alias = np.array(strain_field, copy=False)
-            strain_field_alias[:,:,:,:,:,:] = 0
+            #strain_field_alias[:,:,:,:,:,:] = 0
+
+            material_phase_alias = np.array(material_phase, copy=False)
+            for pixel_id, pixel_coord in cell.pixels.enumerate():
+                i,j,k = pixel_coord[0], pixel_coord[1],  pixel_coord[2]
+                material_phase_alias[i,j,k] = int(bulk[i,j,k])
             #size_array = cell.get_fields().get_field("grad").array()
             #material.update_vacuum_strain_field(cell, np.zeros_like(size_array).reshape(-1,1,order="F"))
             material.dt = 3000
             #material.update_temperature_field(cell, 293*np.ones_like(material_phase).reshape(-1,1,order="F"))
             #strain_field = cell.get_fields().get_field("grad")
             #strain_field_alias = np.array(strain_field, copy=False)
             #strain_field_alias *= 0.0
             res = solver.solve_load_increment(np.zeros((3,3)))
 
             c_data, point_data = read_data_of_step(file_io_object_read, cell, solver, False, material)
             writer.write_data(timestamp_arr[1]+material.dt, cell_data=c_data, point_data=point_data)
 
            
         #for i in range(1):
         	
         #    print('=== STEP {}/{} ==='.format(i+1, nb_steps))
 
       
         #    if layer_no == 333:
                 #flux_arr = cell.get_fields().get_field('flux').array()
                 #grad_arr = cell.get_fields().get_field('grad').array()
                 #vacuum_strain_arr = cell.get_globalised_internal_real_field("vacuum_strain").array()
                 #flux_arr_alias = np.array(flux_arr, copy=False)
                 #grad_arr_alias = np.array(grad_arr, copy=False)
                 #vacuum_strain_arr_alias = np.array(vacuum_strain_arr, copy=False)
                 #flux_arr_alias[:,:,:,:,:] = 0
                 #grad_arr_alias[:,:,:,:,:] = 0
                 #vacuum_strain_arr_alias[:,:,:,:,:] = 0
                 #material.update_vacuum_strain_field(cell, vacuum_strain_arr_alias.reshape(-1,1, order="F"))
          #       material.update_temperature_field(cell, 293*np.ones_like(bulk_array.reshape(-1,1,order="F")))
          #       material.dt = 3000
                 #material.alpha_thermal = 0.0
          #       res = solver.solve_load_increment(np.zeros((3,3)))
 
             #msp.linear_finite_elements.write_3d_class(
             #"ekin_tutorial_3D_spectral_output_{}.xdmf".format(i), cell, solver, cell_data=c_data)
             comm_py.barrier()
             
             
     #stress = cell.get_fields().get_field('flux').array()
     #strain = cell.get_fields().get_field('grad').array()
     #temperature = cell.get_fields().get_field('temperature').array()
     #np.savez('previous_fields.npz', stress=stress, strain= strain, temperature=temperature)
     
     file_io_object_read.close()
     
     
 if __name__=='__main__':
 
 
     main()
diff --git a/mechanical_model/residual_stress_analysis_hea_1q.py b/mechanical_model/residual_stress_analysis_hea_1q.py
index 4484649..7a4841c 100644
--- a/mechanical_model/residual_stress_analysis_hea_1q.py
+++ b/mechanical_model/residual_stress_analysis_hea_1q.py
@@ -1,806 +1,811 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 """
 Created on Mon Jul 25 14:38:47 2022
 
 @author: ekinkubilay
 """
 import os
 import sys
 #import h5py
 
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/python"))
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/libmufft/python"))
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/libmugrid/python"))
 
 import muFFT
 import muSpectre as msp
 
 import numpy as np
 import muGrid
 from muFFT import Stencils2D
 from muFFT import Stencils3D
 import cProfile, pstats
 import time as timer
 
 from mpi4py import MPI
 from dolfin import Mesh, XDMFFile, MeshTransformation, Point, HDF5File, FunctionSpace, Function
 
 from petsc4py import PETSc
 import argparse
 
 import gc
 #from transfer_previous_layer import transfer_previous_layer
 
 def update_temperature_field(cell, temperature_array):
     
     sub_domain_loc = cell.subdomain_locations
     
     #temperature = np.zeros((cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]))
     temperature = np.zeros((1,cell.nb_quad_pts, cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]))
 
     for pixel_id, pixel_coord in cell.pixels.enumerate():
         i, j, k = pixel_coord[0], pixel_coord[1], pixel_coord[2]
         a = i - sub_domain_loc[0]
         b = j - sub_domain_loc[1]
         c = k - sub_domain_loc[2] 
         for q in range(cell.nb_quad_pts):
             temperature[0,q,a,b,c] = np.max([temperature_array[0,q,i,j,k],0]) + 293
 
     return temperature
 
     
 def parse_args():
     """
     Function to handle arguments
     """
     parser = argparse.ArgumentParser(description='Run given layer')
     parser.add_argument("layer_no", type=int, help='layer_number')
     parser.add_argument("mu_rank", type=int, help='number of processes for muSpectre')
     parser.add_argument("factor", type=float, help='factor for material parameter analysis')
     parser.add_argument("target_directory",  help='target directory for all output/input files')
     args = parser.parse_args()
 
     return args
     
 def main():
     
     args = parse_args()
     layer_no = args.layer_no
     factor = args.factor
     target_directory = args.target_directory
     #os.chdir('/scratch/kubilay/mechanical_model/results/mu_layer')
     os.chdir(target_directory)
 
     #comm_py is WORLD communicator (with even size), it is split into two
     #set muspectre communicator
     from mpi4py import MPI
     comm_py = MPI.COMM_WORLD
     rank_py = comm_py.rank
     size_py = comm_py.size
 
     #print("Hello! I'm rank %d from %d running in total..." % (comm_py.rank, comm_py.size))
     #size must be even, color is tags for the split ranks
     mu_rank = args.mu_rank
     color = np.sign(int(rank_py/(size_py-mu_rank)))
     
     
     #split communicator
     comm = comm_py.Split(color, rank_py)
     rank = comm.rank
     size = comm.size
         
     #print(rank_py, size_py, rank, size, color)
     #print("Total number of muSpectre tasks: %d \t Total number of fenics tasks: %d" % (comm_py.size-size, size))
 
     #define empty variable for broadcast buffer
     nb_steps = None
     dx, dy, dz = 0.0002, 0.0003125, 0.00003
     z_lim = 0.00801 
     x_lim = 0.055   
     element_dimensions = [dx, dy, dz]
     #define global simulation parameters
     dim = 3
     nb_quad = 1
     #nb_domain_grid_pts = [11,11,11]
             
     #define constant material properties
     young = 162e9 #GPa
     poisson = 0.277 #unitless
     alpha_s = 0.55 #unitless
     Delta_E_bs = 1.13*factor**(2/3) #eV
     epsilon_sat = 0.35 # unitless
     sigma_gb = 0.150e9 #GPa
-    sigma_s0 = 0.254*factor**(4/3)*1e9 #GPa
-    sigma_f = 1.200e9 #GPa
+    #sigma_s0 = 0.254*factor**(4/3)*1e9 #GPa
+    sigma_s0 = 0.254*1e9
+    sigma_f = 1.240e9 #GPa
     
     ## define the convergence tolerance for the Newton-Raphson increment
     newton_tol = 1e-3
     equil_tol = newton_tol
     ## tolerance for the solver of the linear cell
     cg_tol = -1e-2
        
     ## macroscopic strain
     strain_step = np.zeros((3,3)) #no applied external strain
     maxiter = 500000  # for linear cell solver
        
     #exact coordinates of each quadrature point relative to the local element coordinates
     quad_coords = [np.array([0.75,0.5,0.25]), np.array([0.75,0.25,0.5]), np.array([0.5,0.75,0.25]), np.array([0.25,0.75,0.5]), np.array([0.5,0.25,0.75]), np.array([0.25,0.5,0.75])]
     quad_coords = quad_coords*np.array([dx,dy,dz])
     
     if color == 0:
         #local size and rank on the split communicator, this communicator is called comm
         rank = comm.rank
         procs = comm.size
         other_size = size_py - size
         if rank == 0: print("Total number of muSpectre tasks: %d \t Total number of fenics tasks: %d" % (comm_py.size-size, size))
         start_time = MPI.Wtime()
         
         send_ranks = np.array_split(np.arange(0,other_size, 1)+size, size)[rank]
         #print(send_ranks)
         from dolfin import Mesh, XDMFFile, MeshTransformation, Point, HDF5File, FunctionSpace, Function
 
         #get the geometry and temperature via fenics
         mesh = Mesh(comm)
 
         #read the mesh and the temperature function at initial timestep
         #using Fenics within the FEM communicator
 
         #filename = "/work/lammm/kubilay/mesh/layer_"+str(layer_no).zfill(3)+".xdmf"
         #temperature_filename = "/work/lammm/kubilay/results/temperature/temperature_layer_"+str(layer_no).zfill(3)+".h5"
         filename = "/scratch/kubilay/mechanical_model/results/coarse/temperature_coarse_"+str(layer_no).zfill(3)+".xdmf"
         temperature_filename = "/scratch/kubilay/mechanical_model/results/coarse/relevant_temperature_"+str(layer_no).zfill(3)+".h5"
         f = XDMFFile(comm, filename)
         f.read(mesh)
         #MeshTransformation.rescale(mesh, 1e-3, Point(0,0,0)) #in m
         f.close()
         del(f)
         gc.collect()
         bounding_box = mesh.bounding_box_tree()
         hdf5_file = HDF5File(comm, temperature_filename, "r")
         V = FunctionSpace(mesh, "CG", 1) 
         temperature_field_store = Function(V)
         nb_steps = hdf5_file.attributes('Temperature')['count']-1
         timestamp_arr = np.zeros(nb_steps+1)
         for t in range(nb_steps+1):
             vec_name = "/Temperature/vector_%d"%t
             time = hdf5_file.attributes(vec_name)['timestamp']
             timestamp_arr[t] = time
         no = 0
         vec_name = "/Temperature/vector_%d"%no
         hdf5_file.read(temperature_field_store, vec_name)
         #obtain local domain dimensions
         xmax = np.max(mesh.coordinates()[:,0])
         xmin = np.min(mesh.coordinates()[:,0])
         ymax = np.max(mesh.coordinates()[:,1])
         ymin = np.min(mesh.coordinates()[:,1])
         zmax = np.max(mesh.coordinates()[:,2])
         zmin = np.min(mesh.coordinates()[:,2])
         #communicate max/min domain values within FEM communicator
         xmax = comm.allreduce(xmax, op=MPI.MAX)
         ymax = comm.allreduce(ymax, op=MPI.MAX)
         zmax = comm.allreduce(zmax, op=MPI.MAX)
         xmin = comm.allreduce(xmin, op=MPI.MIN)
         ymin = comm.allreduce(ymin, op=MPI.MIN)
         zmin = comm.allreduce(zmin, op=MPI.MIN)
         #z_lim = zmin #DELETE TO CONTROL LAYERS
         #calculate domain lengths and element dimensions
         domain_lengths = [x_lim-xmin,ymax-ymin,zmax-z_lim]
 
         nb_domain_grid_pts = list(np.array(domain_lengths)/np.array(element_dimensions))
         nb_domain_grid_pts = [round(i) for i in nb_domain_grid_pts]
 
         #array to store material (and not vacuum or base plate)
         bulk = np.ones(nb_domain_grid_pts)
         #domain discretisation TO DO:muspectre has this array already
         #x_dim = np.linspace(xmin, x_lim, nb_domain_grid_pts[0])
         #y_dim = np.linspace(ymin, ymax, nb_domain_grid_pts[1])
         #z_dim = np.linspace(z_lim, zmax, nb_domain_grid_pts[2])
     
         #obtain the material (and not internal vacuum) by observing if the point
         #is in or out the submesh. This creates comm.size number of a partially
         #filled array where it is non-zero only on the sub-mesh present at that rank
         #obtain the temperature by searching every point at every sub-mesh domain
         #pass if fenics raises an error (due to point not being present on that domain)
         #for i,x in enumerate(x_dim):
         #    for j,y in enumerate(y_dim):
         #        for k,z in enumerate(z_dim):
         #            if len(bounding_box.compute_collisions(Point(x+0.5*dx,y+0.5*dy,z+0.5*dz))) != 0:
         #                bulk[i,j,k] = 1
 
         del(bounding_box)
         gc.collect()
 
         #gather the material across all FEM ranks and broadcast to all FEM ranks
         #bulk and temperature_array are global (size corresponds to complete domain)
         #bulk = comm.allreduce(bulk, op=MPI.SUM)
         #bulk[bulk > 1] = 1
 
 
         #add base plate at the bottom and vacuum on top and sides
         #increase the number of grid points and total domain length in each dimension
         #bulk = np.insert(bulk, 0, 2, axis=2)
         #nb_domain_grid_pts[2] += 1
         #domain_lengths[2] += element_dimensions[2]
         #bulk = np.insert(bulk, 0, 2, axis=2)
         #nb_domain_grid_pts[2] += 1
         #domain_lengths[2] += element_dimensions[2]
         
-        #vacuum on top layer (1)
+        #vacuum on top layer (2)
+        bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
+        nb_domain_grid_pts[2] += 1
+        domain_lengths[2] += element_dimensions[2]
         bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
         nb_domain_grid_pts[2] += 1
         domain_lengths[2] += element_dimensions[2]
-        #bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
-        #nb_domain_grid_pts[2] += 1
-        #domain_lengths[2] += element_dimensions[2]
 
-        #vacuum on y plane (2)
+        #vacuum on y plane (1)
         bulk = np.insert(bulk, nb_domain_grid_pts[1], 0, axis=1)
         nb_domain_grid_pts[1] += 1
         domain_lengths[1] += element_dimensions[1]
         #bulk = np.insert(bulk, nb_domain_grid_pts[1], 0, axis=1)
         #nb_domain_grid_pts[1] += 1
         #domain_lengths[1] += element_dimensions[1]
 
-        #vacuum on x plane (1)
+        #vacuum on x plane (2)
         bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
         nb_domain_grid_pts[0] += 1
         domain_lengths[0] += element_dimensions[0]
         bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
         nb_domain_grid_pts[0] += 1
         domain_lengths[0] += element_dimensions[0]
         #bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
         #nb_domain_grid_pts[0] += 1
         #domain_lengths[0] += element_dimensions[0]
         #bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
         #nb_domain_grid_pts[0] += 1
         #domain_lengths[0] += element_dimensions[0]
 
         #base layer on bottom layer (10)
         no_base_layer = 22
         for i in range(no_base_layer):
             bulk = np.insert(bulk, 0, 2, axis=2)
             nb_domain_grid_pts[2] += 1
             domain_lengths[2] += element_dimensions[2]
         
         #domain discretisation TO DO:muspectre has this array already
         x_dim = np.linspace(xmin, x_lim+1*element_dimensions[0], nb_domain_grid_pts[0])+0.5*dx
         y_dim = np.linspace(ymin, ymax+0*element_dimensions[1], nb_domain_grid_pts[1])+0.5*dy
-        z_dim = np.linspace(z_lim-no_base_layer*element_dimensions[2], zmax+0*element_dimensions[2], nb_domain_grid_pts[2])+0.5*dz
+        z_dim = np.linspace(z_lim-no_base_layer*element_dimensions[2], zmax+1*element_dimensions[2], nb_domain_grid_pts[2])+0.5*dz
         
         coords = np.zeros([nb_domain_grid_pts[0],nb_domain_grid_pts[1],nb_domain_grid_pts[2],3])
         temperature_array = np.zeros((1,nb_quad)+tuple(nb_domain_grid_pts))
 
         for i in range(nb_domain_grid_pts[0]):
             coords[i,:,:,0] = x_dim[i]
         for i in range(nb_domain_grid_pts[1]):
             coords[:,i,:,1] = y_dim[i]
         for i in range(nb_domain_grid_pts[2]):
             coords[:,:,i,2] = z_dim[i]
 
         
         s_list = [(i,i+0.001) for i in np.linspace(0.005, 0.050, 26)]
         #s_list = [(0.005, 0.05)]
         support_mask = np.ones_like(x_dim)
         for i,x in enumerate(x_dim):
             loc = x
             for j,s in enumerate(s_list):
                 if loc>s[0] and loc<s[1]:
                     support_mask[i] = 0
         for j,y in enumerate(y_dim):
             bulk[:,j,no_base_layer-1] = bulk[:,j,no_base_layer-1]*support_mask
-        #bulk[:,-1,no_base_layer-1] = 0
-        #bulk[-1,:,no_base_layer-1] = 0
-        bulk[:,-1,:] = 0
-        bulk[-1,:,:] = 0
-        bulk[-2,:,:] = 0
+        bulk[:,-1,no_base_layer-1] = 0
+        bulk[-1,:,no_base_layer-1] = 0
+        bulk[-2,:,no_base_layer-1] = 0
+        #bulk[:,-1,:] = 0
+        #bulk[-1,:,:] = 0
+        #bulk[-2,:,:] = 0
         #quadrature coordinates relative to the part 
         #local bulk information on each partition
         quad_coordinates_array = np.zeros(tuple(np.shape(temperature_array))+(3,))
         bulk_local = np.zeros_like(bulk)
         for i,x in enumerate(x_dim):
             for j,y in enumerate(y_dim):
                 for k,z in enumerate(z_dim):
                     if bulk[i,j,k] == 1:
                         for q in range(nb_quad):
                             try:
                                 #quad_coordinates_array[0,q,i,j,k] = Point(x+quad_coords[q,0],y+quad_coords[q,1],z+quad_coords[q,2])[:]
                                 quad_coordinates_array[0,q,i,j,k] = Point(x,y,z)[:]
                             except RuntimeError:
                                 pass         
         local_multi_index = []   
         local_multi_index_coordinates = []
         with np.nditer([bulk], flags=['multi_index'], op_flags=['readwrite']) as it:
             for iterable in it:
                 if bulk[it.multi_index] == 1:
                     for q in range(nb_quad):
                         final_index = tuple((0,q))+it.multi_index
                         try:
                             temperature_field_store(quad_coordinates_array[tuple((0,q))+it.multi_index])
                             bulk_local[it.multi_index] = bulk[it.multi_index]
                             local_multi_index.append(final_index)
                             local_multi_index_coordinates.append(quad_coordinates_array[final_index])
                             #break
                         except RuntimeError:
                             pass
         # print("element_dimensions", element_dimensions)
         # print("element_dimensions", nb_domain_grid_pts)
         # print(x_dim)
         
         #store the temperature on the array with the right index if point is found
         #this creates comm.size number of temperature_arrays each non-zero only on 
         #the sub-mesh present in that rank
         #mesh and temperature are divided differently accross communicators!! TO LOOK!
         # for i,x in enumerate(x_dim):
         #     for j,y in enumerate(y_dim):
         #         for k,z in enumerate(z_dim):
         #             if bulk[i,j,k] == 1:
         #                 for q in range(nb_quad):
         #                     try:
         #                         temperature_array[0,q,i,j,k] = temperature_field_store(Point(x,y,z))
         #                     except RuntimeError:
         #                         pass
 
         #send global arrays (material/temperature/coordinates) from FEM communicator
         #to muSpectre communicator
         for proc_rank in send_ranks: 
             
             comm_py.send(rank, proc_rank, tag=0)
             
             comm_py.send(bulk, proc_rank, tag=1)
             
             comm_py.send(nb_domain_grid_pts, proc_rank, tag=3)
     
             comm_py.send(domain_lengths, proc_rank, tag=4)
             
             comm_py.send(coords, proc_rank, tag=5)
             
             comm_py.send(timestamp_arr, proc_rank, tag=6)
             
             comm_py.send(temperature_array, proc_rank, tag=7)
         
 
         list_subdomain_loc = [None] * size_py
         list_nb_subdomain_grid_pts = [None] * size_py
         
         #print(rank_py, send_ranks)
         for f, proc_rank in enumerate(send_ranks):
             list_subdomain_loc[proc_rank] = comm_py.recv(source = proc_rank, tag=proc_rank)
         
         # for i,j in enumerate(list_subdomain_loc):
         #     print(rank, i,j)
         
         for f, proc_rank in enumerate(send_ranks):
             list_nb_subdomain_grid_pts[proc_rank] = comm_py.recv(source = proc_rank, tag=proc_rank)
         
         # for i,j in enumerate(list_nb_subdomain_grid_pts):
         #     print(rank, i,j)
     
         end_time = MPI.Wtime()
         #print("FENICS INIT \n Local rank = {}, took {} seconds".format(rank, end_time-start_time))
     
     #for muSepctre processes
     if color == 1:
         
         #define muSpectre communicator, this communicator is called comm_mu
         comm_mu = muGrid.Communicator(comm)
         #local size and rank on the split communicator
         rank = comm.rank
         procs = comm.size
         
         start_time = MPI.Wtime()
 
         #recieve from FEM communicator material arrays and system size parameters
         recieve_rank = comm_py.recv(source = MPI.ANY_SOURCE, tag=0)
         
         bulk = comm_py.recv(source = recieve_rank, tag=1)
                 
         nb_domain_grid_pts = comm_py.recv(source = recieve_rank, tag=3)
 
         domain_lengths = comm_py.recv(source = recieve_rank, tag=4)
         
         coords = comm_py.recv(source = recieve_rank, tag=5)
         
         #recieve empty timestamp array from FEM communicator
         timestamp_arr = comm_py.recv(source = recieve_rank, tag=6)
         delta_t_vector =  timestamp_arr[1:]-timestamp_arr[:-1]
         delta_t_vector = np.insert(delta_t_vector, 0,0)
         temperature_array = comm_py.recv(source= recieve_rank, tag=7)
         
         #create cell using muSpectre communicator
         cell = msp.cell.CellData.make_parallel(list(nb_domain_grid_pts), list(domain_lengths), comm_mu)
         cell.nb_quad_pts = nb_quad
         cell.get_fields().set_nb_sub_pts("quad", nb_quad)
         
         #define materials
         material = msp.material.MaterialHEA_3d.make(cell, "material", young, poisson, alpha_s, Delta_E_bs, epsilon_sat, sigma_gb, sigma_s0, sigma_f)
         vacuum = msp.material.MaterialLinearElastic1_3d.make(cell, "vacuum", 0.0, 0.3)
         #base = msp.material.MaterialLinearElastic1_3d.make(cell, "base", 3*young, 0.3)
         #transverse_aniso = [178, 51, 63.4, 0, 0, 0, 178, 63.4, 0, 0, 0, 1655, 0, 0, 0, 634, 0, 0, 634, 0, 63.4]
         transverse_aniso = [3*617e9, 3*236e9, 3*236e9, 0, 0, 0, 3*617e9, 3*236e9, 0, 0, 0, 3*617e9, 0, 0, 0, 3*190.3e9, 0, 0, 3*190.3e9, 0, 3*190.3e9]
         base = msp.material.MaterialLinearAnisotropic_3d.make(cell, "base", transverse_aniso)
 
         #register fields and field collection
         material_phase = cell.get_fields().register_int_field("material2", 1, 'quad')
         bulk_array = np.zeros(np.shape(material_phase.array()), np.dtype(float))
         melt_bool = -1*np.ones(np.shape(bulk_array), np.dtype(np.int32))
         sub_domain_loc = cell.subdomain_locations
         nb_subdomain_grid_pts = cell.nb_subdomain_grid_pts
         #assign materials
         for pixel_index, pixel in enumerate(cell.pixels):
             i, j, k = pixel[0], pixel[1], pixel[2]
             a = i - sub_domain_loc[0]
             b = j - sub_domain_loc[1]
             c = k - sub_domain_loc[2]
             if bulk[tuple(pixel)]==1:
                 material.add_pixel(pixel_index)
                 bulk_array[a,b,c] = 1
             elif bulk[tuple(pixel)]==0:
                 vacuum.add_pixel(pixel_index)
             elif bulk[tuple(pixel)]==2:
                 base.add_pixel(pixel_index)
           
         #define solver parameters 
         #gradient = Stencils3D.trilinear_1q_finite_element
         gradient = Stencils3D.averaged_upwind
         weights = nb_quad * [1]
 
         material.identify_temperature_loaded_quad_pts(cell, bulk_array.reshape(-1,1, order="F"))
         #material.dt = 2.5e-5
-        material.T_m = 1650
+        material.T_m = 1600
         material.alpha_thermal = 22e-6
-        
+        material.dt = 2.5e-4
 
         #local_bulk_array = bulk[sub_domain_loc[0]:sub_domain_loc[0]+nb_subdomain_grid_pts[0], sub_domain_loc[1]:sub_domain_loc[1]+nb_subdomain_grid_pts[1], sub_domain_loc[2]:sub_domain_loc[2]+nb_subdomain_grid_pts[2]]
 
         ## use solver 
         krylov_solver = msp.solvers.KrylovSolverCG(cg_tol, maxiter, msp.Verbosity.Silent)
         solver = msp.solvers.SolverNewtonCG(cell, krylov_solver, msp.Verbosity.Silent, newton_tol, equil_tol, maxiter, gradient, weights)    
         solver.formulation = msp.Formulation.small_strain
         solver.initialise_cell()
 
     
         #define file input/output parameters for muSpectre output
         # output_filename = "layer_"+str(layer_no-1).zfill(3)+".nc"
         # if comm_mu.rank == 0 and os.path.exists(output_filename):
         #     os.remove(output_filename)
         output_filename = "layer_"+str(layer_no).zfill(3)+".nc"
         if comm_mu.rank == 0 and os.path.exists(output_filename):
             os.remove(output_filename)        
         comm_mu.barrier()
         
         file_io_object_write = muGrid.FileIONetCDF(output_filename, 
                                       muGrid.FileIONetCDF.OpenMode.Write, 
                                       comm_mu)
                                               
         #coordinates = cell.get_fields().register_real_field("coordinates", 3, 'quad')
         #temperature_field = cell.get_fields().register_real_field("temperature2", 1, 'quad')
         #vacuum_strain_field = cell.get_fields().register_real_field("vacuum_strain2", (3,3), "quad")
 
         file_io_object_write.register_field_collection(field_collection=cell.fields)
         file_io_object_write.register_field_collection(field_collection=material.collection)
         #print("cell field names:\n", cell.get_field_names())
         #initialize global time attribute (to be updated once filled)
         
         file_io_object_write.write_global_attribute('timestamp', timestamp_arr)
         
 
         #create pointer-like copies for fields
         material_phase_alias = np.array(material_phase, copy=False)
         #coordinates_alias = np.array(coordinates, copy=False)
         #temperature_alias = np.array(temperature_field, copy=False)
         #vacuum_strain_alias = np.array(vacuum_strain_field, copy=False)
         #melt_bool = -1*np.ones(np.shape(material_phase), np.dtype(np.int32))
         #assign values to the above created aliases
         for pixel_id, pixel_coord in cell.pixels.enumerate():
             i, j, k = pixel_coord[0], pixel_coord[1], pixel_coord[2]
             for q in range(nb_quad):
                 # local coordinates on the processor
                 a = i - sub_domain_loc[0]
                 b = j - sub_domain_loc[1]
                 c = k - sub_domain_loc[2]
         
                 material_phase_alias[a, b, c] = int(bulk[i, j, k])
                 #coordinates_alias[0, q, a, b, c] = coords[i,j,k][0]+quad_coords[q,0]
                 #coordinates_alias[1, q, a, b, c] = coords[i,j,k][1]+quad_coords[q,1]
                 #coordinates_alias[2, q, a, b, c] = coords[i,j,k][2]+quad_coords[q,2]
                 #if k == nb_domain_grid_pts[2]-2:
                     #melt_bool[0,q,a,b,c] = 0
         #use the temperature array to change the values using temperature
         #array pointer (alias)
         #temperature_alias[:,:,:,:,:] = update_temperature_field(cell, temperature_array)[:,:,:,:,:]         
 
         comm_mu.barrier()
         if layer_no not in [10,30,50,70,60,90,130,170,210,250,268]:
 
             from transfer_previous_layer_1q import transfer_previous_layer
 
             strain_field = cell.get_fields().get_field("grad")
             strain_field_alias = np.array(strain_field, copy=False)
             stress_field = cell.get_fields().get_field("flux")
             stress_field_alias = np.array(stress_field, copy=False)
             size_array = cell.get_fields().get_field("flux").array()
 
             if rank == 0: 
 
                 previous_data = transfer_previous_layer(target_directory, layer_no-1, bulk, nb_domain_grid_pts, domain_lengths)
                 #, np.max(coords[:,:,:,0]), np.max(coords[:,:,:,1]), np.max(coords[:,:,:,2]), np.min(coords[:,:,:,0]), np.min(coords[:,:,:,1]), np.min(coords[:,:,:,2])) 
             else:
                 previous_data = None
             comm_mu.barrier()
 
             previous_data = comm.bcast(previous_data, root = 0)
             [previous_stress, previous_strain, previous_epsilon_bar, previous_melt_pool, previous_material_temperature, previous_epsilon_plastic, previous_material_vacuum_strain] = previous_data
             sub_dx, sub_dy, sub_dz = sub_domain_loc[0], sub_domain_loc[1], sub_domain_loc[2]
             sub_gx, sub_gy, sub_gz = nb_subdomain_grid_pts[0], nb_subdomain_grid_pts[1], nb_subdomain_grid_pts[2]
             previous_epsilon_bar = previous_epsilon_bar[sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
             previous_melt_pool = previous_melt_pool[sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
             previous_material_temperature = previous_material_temperature[sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
             previous_epsilon_plastic = previous_epsilon_plastic[:,:,sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
             previous_material_vacuum_strain = 0*previous_material_vacuum_strain[:,:,sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
 
             #assign values to the above created aliases
             for pixel_id, pixel_coord in cell.pixels.enumerate():
                 i, j, k = pixel_coord[0], pixel_coord[1], pixel_coord[2]
                 a = i - sub_domain_loc[0]
                 b = j - sub_domain_loc[1]
                 c = k - sub_domain_loc[2]
                 if k < nb_domain_grid_pts[2]-1:
                     for q in range(nb_quad):
                         # local coordinates on the processor
 
                         #strain_field_alias[:, :, q, a, b, c] = previous_strain[:, :, q, i, j, k]
                         stress_field_alias[:, :, q, a, b, c] = previous_stress[:, :, q, i, j, k]
                         #temperature_alias[0,q,a,b,c] = previous_temperature[0,q,i,j,k]
                         #vacuum_strain_alias[:, :, q, a, b, c] = previous_vacuum_strain[:, :, q, i, j, k]
                 #if k == nb_domain_grid_pts[2]-2:
                         #vacuum_strain_alias[:, :, q, a, b, c] = previous_strain[:, :, q, i, j, k]
 
             material.update_temperature_field(cell, previous_material_temperature.reshape(-1,1, order="F"))
             material.update_epsilon_bar_field(cell, previous_epsilon_bar.reshape(-1,1, order="F"))
             material.update_melt_bool_field(cell, previous_melt_pool.reshape(-1,1, order="F"))
             material.update_epsilon_plastic_field(cell, previous_epsilon_plastic.reshape(-1,1, order="F"))
             material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
             del(previous_stress, previous_strain, previous_epsilon_bar, previous_melt_pool, previous_material_temperature, previous_epsilon_plastic)
             gc.collect()
         
         else:
             #use the temperature array to change the values using temperature
             #array pointer (alias)
             size_array = cell.get_fields().get_field("flux").array()
             material.update_temperature_field(cell, update_temperature_field(cell, temperature_array).reshape(-1,1, order="F"))
             material.update_epsilon_bar_field(cell, np.zeros_like(bulk_array).reshape(-1,1, order="F"))
             material.update_epsilon_plastic_field(cell, np.zeros_like(size_array).reshape(-1,1, order="F"))
             material.update_melt_bool_field(cell, melt_bool.reshape(-1,1, order="F"))
             material.update_vacuum_strain_field(cell, np.zeros_like(size_array).reshape(-1,1, order="F"))
             strain_field = cell.get_fields().get_field("grad")
             strain_field_alias = np.array(strain_field, copy=False)
 
         comm_mu.barrier()
         
         comm_py.send([cell.subdomain_locations[0],cell.subdomain_locations[1],cell.subdomain_locations[2]], recieve_rank, tag=rank+size_py-procs)
         
         comm_py.send([cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]], recieve_rank, tag=rank+size_py-procs)
 
         end_time = MPI.Wtime()
         #print("MUSPECTRE INIT \n Local rank = {}, took {} seconds".format(rank, end_time-start_time))
 
     #make nb_Steps global by broadcasting
     nb_steps = comm_py.bcast(nb_steps, root = 0)
     start = timer.time()
     comm_py.barrier()
     #seperate operations for FEM and muSpectre communicators depending on ranks
     if color == 1:
         #temperature_array = np.zeros(nb_domain_grid_pts)
         
         start_time = MPI.Wtime()
         
         dummy_temperature = np.zeros((1,cell.nb_quad_pts, cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]))
         if layer_no%2 == 0: 
             release_limit = 11910
         else:
             release_limit = 11870
         #for all steps
         for s in range(nb_steps):
             step_start = MPI.Wtime()
             starting_step = MPI.Wtime()
             #if rank == 0:
             #    print('=== STEP {}/{} ==='.format(s+1, nb_steps))
             #    print(delta_t_vector[s], timestamp_arr[s])
             
             #recieve temp_array from constant sender request
             #temperature_array = np.zeros(nb_domain_grid_pts)
             #if rank == 0: print("MUSPECTRE Step {} --- Recieve request created: {}".format(s, MPI.Wtime()- step_start))
-            material.dt = delta_t_vector[s]
+            #material.dt = delta_t_vector[s]
             req  = comm_py.Irecv(dummy_temperature, source = recieve_rank, tag=s)
             #if rank == 0: print("MUSPECTRE Step {} --- Recieve request done: {}".format(s, MPI.Wtime()- step_start))
             #if (s+1)%500 == 0:
                 #field_1 = cell.get_fields().get_field("grad")
                 #field_1_alias = np.array(field_1, copy = False)
                 #field_3 = cell.get_globalised_internal_real_field("vacuum_strain").array().reshape((dim, dim, nb_quad)+tuple(nb_subdomain_grid_pts), order="f")
                 #print(rank, nb_domain_grid_pts[0]-nb_subdomain_grid_pts[0], sub_domain_loc[0], np.shape(strain_field_alias))
                 #print(rank, nb_domain_grid_pts[1]-nb_subdomain_grid_pts[1], sub_domain_loc[1], np.shape(strain_field_alias))
                 #print(rank, nb_domain_grid_pts[2]-nb_subdomain_grid_pts[2], sub_domain_loc[2], np.shape(strain_field_alias))
                 #strain_field_alias -= field_3
                 #field_3_alias = np.array(field_3, copy=False)
                 #field_3_alias[:,:,:,:,:] = 0
                 #strain_field_alias[:,:,:,-1,:,:] = 0
                 #strain_field_alias[:,:,:,:,-1,:] = 0
                 #strain_field_alias[:,:,:,:,:,-1] = 0
             if s == release_limit: 
-            #    strain_field_alias[:,:,:,:,:,:] = 0
-                if layer_no == 268: material.update_vacuum_strain_field(cell, np.zeros_like(size_array).reshape(-1,1, order="F"))
-                else: material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
+                if layer_no == 268:
+                    strain_field_alias -= cell.get_globalised_internal_real_field('vacuum_strain').array().reshape((dim, dim, nb_quad)+tuple(nb_subdomain_grid_pts), order='f')
+                    material.update_vacuum_strain_field(cell, np.zeros_like(size_array).reshape(-1,1, order="F"))
+                else: 
+                    strain_field_alias -= cell.get_globalised_internal_real_field('vacuum_strain').array().reshape((dim, dim, nb_quad)+tuple(nb_subdomain_grid_pts), order='f')
+                    material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
                 #print(rank, np.shape(field_1), np.shape(strain_field_alias), np.shape(field_3), nb_subdomain_grid_pts[0], nb_subdomain_grid_pts[1], nb_subdomain_grid_pts[2])
             solve_start = MPI.Wtime()
             #try:
                 #solve machanics problem
             res = solver.solve_load_increment(strain_step)
             #if rank ==0 : print(s, delta_t_vector[s], res.newton_norm, res.equil_norm)
                 #comm_mu.barrier()
             #except RuntimeError:
             #    if rank == 0: 
             #        print("failed to converge at step", s)
             #    strain_field_alias[:,:,:,:,:,-2:] *= 1.05
             #    strain_field_alias[:,:,:,:,-2:,:] *= 1.05
             #    strain_field_alias[:,:,:,-2:,:,:] *= 1.05
             #    material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
             #    res = solver.solve_load_increment(strain_step)
                 #file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
                 #file_io_object_write.update_global_attribute('timestamp', 'timestamp', timestamp_arr)
                 #file_io_object_write.close()
                 #break
             comm_mu.barrier()
             #if rank == 0: print("newton_norm: ", res.newton_norm, "   equil norm: ", res.equil_norm)
             #if rank == 0: print("MUSPECTRE Rank {} - Step {} --- Total Solve time: {}".format(rank, s, MPI.Wtime()- solve_start) )
 
             #write all fields in .nc format TO DO:not all fields
             #file_io_object_write.append_frame().write(["flux","grad", "epsilon_bar", "epsilon_plastic","temperature", "vacuum_strain", "melt_pool", "material2"])
             #if (s+1)%5 == 0:
                 #print('=== STEP {}/{} ==='.format(s+1, nb_steps))
             	#write all fields in .nc format TO DO:not all fields
                 #file_io_object_write.append_frame().write(["coordinates", "flux", "grad", "material2","temperature2", "epsilon_bar", "temperature", "vacuum_strain", "epsilon_plastic"])
                  #if (s+1) == 100: break
             #print("MUSPECTRE Step {} Rank {} --- Step done: {}".format(s, rank, MPI.Wtime()- step_start))
                         
             #make sure temperature array is recieved       
             step_start = MPI.Wtime() 
             req.wait()
             #if rank == 0: print("MUSPECTRE Step {} --- Recieve request Wait done: {}".format(s, MPI.Wtime()- step_start))
             
             #use the temperature array to change the values using temperature
             #array pointer (alias)
             #temperature_alias[:,:,:,:,:] = dummy_temperature
             #dummy_temperature = np.ones_like(dummy_temperature)*2005 - 4*s
             step_start = MPI.Wtime() 
             material.update_temperature_field(cell, dummy_temperature.reshape(-1,1, order="F"))
             #if rank == 0: print("MUSPECTRE Step {} --- Temperature Update done: {}".format(s, MPI.Wtime()- step_start))
             #if (s+1) <= 1910 and (s+1)>1810:
             #file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
             #if (s+1)%11816 == 0:
                 #if rank == 0: print('=== STEP {}/{} ==='.format(s, nb_steps))
                 #file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
                 #break
             #if rank == 0: print("MUSPECTRE Step {} COMPLETE --- in: {}".format(s, MPI.Wtime()- starting_step))
         #try:
         #material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
         res = solver.solve_load_increment(strain_step)
-        if rank ==0 : print(s, delta_t_vector[s], res.newton_norm, res.equil_norm)
+        #if rank ==0 : print(s, delta_t_vector[s], res.newton_norm, res.equil_norm)
         #except RuntimeError:
         #    print("failed to converge at last step")
         #    strain_field_alias[:,:,:,:,:,:] = 0
         #    material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
         #    res = solver.solve_load_increment(strain_step)
         #write all fields in .nc format TO DO:not all fields
         file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
         if layer_no == 333:
             dummy_temperature[:] = 293.0
             material.update_temperature_field(cell, dummy_temperature.reshape(-1,1, order="F"))
             material.dt = 3000
             res = solver.solve_load_increment(strain_step)
             file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
 
         end_time = MPI.Wtime()
         if rank == 0: print("Total MUSPECTRE time: {}".format(end_time-start))
 
         comm_mu.barrier()   
 
     if color == 0:
         
 
         dummy_array = np.zeros((1,nb_quad)+tuple(nb_domain_grid_pts))
         for s in range(nb_steps):
             start_time = MPI.Wtime()
             starting_step = MPI.Wtime()
             #start constant sending request
             #print("FENICS Step {} --- Send Requests created: {}".format(s, MPI.Wtime()-start_time))
             #reqs = [None] * len(send_ranks)
             #for p, proc_rank in enumerate(send_ranks):
             #    reqs[p] = comm_py.Isend([temperature_array, MPI.DOUBLE], proc_rank, tag=s)
             #print("FENICS Step {} --- Send Requests done: {}".format(s, MPI.Wtime()-start_time))
             #re-read the temperature for the next step
             no += 1
             vec_name = "/Temperature/vector_%d"%no
             timestamp = hdf5_file.attributes(vec_name)['timestamp']
             timestamp_arr[s] = timestamp
             hdf5_file.read(temperature_field_store, vec_name)
             #store it on a dummy array since we don't want to change 
             #temperature array before the sending/recieving is done
             #which will be ensured by MPI.wait
             #dummy_array = np.zeros((1,nb_quad)+tuple(nb_domain_grid_pts))
             #if rank == 0: print("FENICS Step {} --- File Reading: {}".format(s, MPI.Wtime()-start_time))
             #store the temperature on the array with the right index if point is found
             #this creates comm.size number of temperature_arrays each non-zero only on 
             #the sub-mesh present in that rank
             #mesh and temperature are divided differently accross communicators!! TO LOOK!
             loop_time = MPI.Wtime()
             #with np.nditer([bulk_local, dummy_array[0,0,:,:,:]], flags=['multi_index'], op_flags=[['readonly', 'arraymask'], ['writeonly', 'writemasked']]) as it:
             #    for iterable in it:
             #        if bulk_local[it.multi_index] == 1:
             #            for q in range(nb_quad):
             #                try:
             #                    var = temperature_field_store(quad_coordinates_array[tuple((0,q))+it.multi_index])
             #                    if var < 0:
             #                        pass
             #                    else:
             #                        dummy_array[tuple((0,q))+it.multi_index] = var
             #                except RuntimeError:
             #                    pass
 
             for lmi_index, lmi in enumerate(local_multi_index):
                 val = temperature_field_store(quad_coordinates_array[lmi])
                 if val <= 0:
                     dummy_array[lmi] = 0
                 else:
                     dummy_array[lmi] = val
             #if rank == 0: print("FENICS Step {} --- Looping done: {}".format(s, MPI.Wtime()-loop_time))
             #make sure temperature sending is complete and create the new 
             #temperature array
             start_time = MPI.Wtime()
             if s != 0:
                 for p, proc_rank in enumerate(send_ranks):
                     reqs[p].wait()
             #if rank == 0: print("FENICS Step {} --- Previous Requests Wait done: {}".format(s, MPI.Wtime()-start_time))
 
             start_time = MPI.Wtime()
             temperature_array = comm.allreduce(dummy_array, op=MPI.SUM)
             temperature_array = temperature_array + 293
             #if rank == 0: print("FENICS Step {} --- Temperature Reduce done: {}".format(s, MPI.Wtime()-start_time))
             #start constant sending request
             start_time = MPI.Wtime()
             reqs = [None] * len(send_ranks)
             for p, proc_rank in enumerate(send_ranks):
                 send_temp = np.array(temperature_array[:,:,list_subdomain_loc[proc_rank][0]:list_subdomain_loc[proc_rank][0]+list_nb_subdomain_grid_pts[proc_rank][0],list_subdomain_loc[proc_rank][1]:list_subdomain_loc[proc_rank][1]+list_nb_subdomain_grid_pts[proc_rank][1],list_subdomain_loc[proc_rank][2]:list_subdomain_loc[proc_rank][2]+list_nb_subdomain_grid_pts[proc_rank][2]])
                 #print(p,proc_rank, list_subdomain_loc[proc_rank], list_nb_subdomain_grid_pts[proc_rank])
                 #print('send_shape', np.shape(send_temp))
                 reqs[p] = comm_py.Isend([send_temp, MPI.DOUBLE], proc_rank, tag=s)
 
             #if (s+1)%11816 == 0:
                 #print('=== STEP {}/{} ==='.format(s+1, nb_steps))
                 #break
             #if rank == 0: print("FENICS Step {} --- Request sending done: {}".format(s, MPI.Wtime()-start_time))
             #if rank == 0: print("FENICS Step {} COMPLETE --- in: {}".format(s, MPI.Wtime()- starting_step))
             #if rank == 0: print("FENICS Step {} --- Step done: {}".format(s, MPI.Wtime()-start_time))
         
         comm.barrier()
 
         end_time = MPI.Wtime()
         if rank == 0: print("Total FENICS time: {}".format(end_time-start))
      
     #make timestamp array global by broadcasting
     timestamp_arr = comm_py.bcast(timestamp_arr, root = 0)
 
     #if on the muSpectre communicator, update the global timestamp
     if color == 1:
         file_io_object_write.update_global_attribute('timestamp', 'timestamp', timestamp_arr)
         file_io_object_write.close()
     comm_py.barrier()    
     if rank_py == 0: print("Total run time:", timer.time()-start)
 
 
     
     
 if __name__=='__main__':
 
 
     main()
diff --git a/mechanical_model/residual_stress_analysis_hea_1q.py b/mechanical_model/residual_stress_analysis_hea_1q_2.py
similarity index 89%
copy from mechanical_model/residual_stress_analysis_hea_1q.py
copy to mechanical_model/residual_stress_analysis_hea_1q_2.py
index 4484649..7a05bdf 100644
--- a/mechanical_model/residual_stress_analysis_hea_1q.py
+++ b/mechanical_model/residual_stress_analysis_hea_1q_2.py
@@ -1,806 +1,855 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 """
 Created on Mon Jul 25 14:38:47 2022
 
 @author: ekinkubilay
 """
 import os
 import sys
 #import h5py
 
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/python"))
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/libmufft/python"))
 sys.path.insert(0, os.path.join(os.getcwd(), "/home/kubilay/software/muspectre/build/language_bindings/libmugrid/python"))
 
 import muFFT
 import muSpectre as msp
 
 import numpy as np
 import muGrid
 from muFFT import Stencils2D
 from muFFT import Stencils3D
 import cProfile, pstats
 import time as timer
 
 from mpi4py import MPI
 from dolfin import Mesh, XDMFFile, MeshTransformation, Point, HDF5File, FunctionSpace, Function
 
 from petsc4py import PETSc
 import argparse
 
 import gc
 #from transfer_previous_layer import transfer_previous_layer
 
 def update_temperature_field(cell, temperature_array):
     
     sub_domain_loc = cell.subdomain_locations
     
     #temperature = np.zeros((cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]))
     temperature = np.zeros((1,cell.nb_quad_pts, cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]))
 
     for pixel_id, pixel_coord in cell.pixels.enumerate():
         i, j, k = pixel_coord[0], pixel_coord[1], pixel_coord[2]
         a = i - sub_domain_loc[0]
         b = j - sub_domain_loc[1]
         c = k - sub_domain_loc[2] 
         for q in range(cell.nb_quad_pts):
             temperature[0,q,a,b,c] = np.max([temperature_array[0,q,i,j,k],0]) + 293
 
     return temperature
 
     
 def parse_args():
     """
     Function to handle arguments
     """
     parser = argparse.ArgumentParser(description='Run given layer')
     parser.add_argument("layer_no", type=int, help='layer_number')
     parser.add_argument("mu_rank", type=int, help='number of processes for muSpectre')
     parser.add_argument("factor", type=float, help='factor for material parameter analysis')
     parser.add_argument("target_directory",  help='target directory for all output/input files')
     args = parser.parse_args()
 
     return args
     
 def main():
     
     args = parse_args()
     layer_no = args.layer_no
     factor = args.factor
     target_directory = args.target_directory
     #os.chdir('/scratch/kubilay/mechanical_model/results/mu_layer')
     os.chdir(target_directory)
 
     #comm_py is WORLD communicator (with even size), it is split into two
     #set muspectre communicator
     from mpi4py import MPI
     comm_py = MPI.COMM_WORLD
     rank_py = comm_py.rank
     size_py = comm_py.size
 
     #print("Hello! I'm rank %d from %d running in total..." % (comm_py.rank, comm_py.size))
     #size must be even, color is tags for the split ranks
     mu_rank = args.mu_rank
     color = np.sign(int(rank_py/(size_py-mu_rank)))
     
     
     #split communicator
     comm = comm_py.Split(color, rank_py)
     rank = comm.rank
     size = comm.size
         
     #print(rank_py, size_py, rank, size, color)
     #print("Total number of muSpectre tasks: %d \t Total number of fenics tasks: %d" % (comm_py.size-size, size))
 
     #define empty variable for broadcast buffer
     nb_steps = None
     dx, dy, dz = 0.0002, 0.0003125, 0.00003
     z_lim = 0.00801 
     x_lim = 0.055   
     element_dimensions = [dx, dy, dz]
     #define global simulation parameters
     dim = 3
     nb_quad = 1
     #nb_domain_grid_pts = [11,11,11]
             
     #define constant material properties
     young = 162e9 #GPa
     poisson = 0.277 #unitless
     alpha_s = 0.55 #unitless
     Delta_E_bs = 1.13*factor**(2/3) #eV
     epsilon_sat = 0.35 # unitless
     sigma_gb = 0.150e9 #GPa
     sigma_s0 = 0.254*factor**(4/3)*1e9 #GPa
-    sigma_f = 1.200e9 #GPa
+    sigma_f = 1.240e9 #GPa
     
     ## define the convergence tolerance for the Newton-Raphson increment
     newton_tol = 1e-3
     equil_tol = newton_tol
     ## tolerance for the solver of the linear cell
     cg_tol = -1e-2
        
     ## macroscopic strain
     strain_step = np.zeros((3,3)) #no applied external strain
     maxiter = 500000  # for linear cell solver
        
     #exact coordinates of each quadrature point relative to the local element coordinates
     quad_coords = [np.array([0.75,0.5,0.25]), np.array([0.75,0.25,0.5]), np.array([0.5,0.75,0.25]), np.array([0.25,0.75,0.5]), np.array([0.5,0.25,0.75]), np.array([0.25,0.5,0.75])]
     quad_coords = quad_coords*np.array([dx,dy,dz])
     
     if color == 0:
         #local size and rank on the split communicator, this communicator is called comm
         rank = comm.rank
         procs = comm.size
         other_size = size_py - size
         if rank == 0: print("Total number of muSpectre tasks: %d \t Total number of fenics tasks: %d" % (comm_py.size-size, size))
         start_time = MPI.Wtime()
         
         send_ranks = np.array_split(np.arange(0,other_size, 1)+size, size)[rank]
         #print(send_ranks)
         from dolfin import Mesh, XDMFFile, MeshTransformation, Point, HDF5File, FunctionSpace, Function
 
         #get the geometry and temperature via fenics
         mesh = Mesh(comm)
 
         #read the mesh and the temperature function at initial timestep
         #using Fenics within the FEM communicator
 
         #filename = "/work/lammm/kubilay/mesh/layer_"+str(layer_no).zfill(3)+".xdmf"
         #temperature_filename = "/work/lammm/kubilay/results/temperature/temperature_layer_"+str(layer_no).zfill(3)+".h5"
         filename = "/scratch/kubilay/mechanical_model/results/coarse/temperature_coarse_"+str(layer_no).zfill(3)+".xdmf"
         temperature_filename = "/scratch/kubilay/mechanical_model/results/coarse/relevant_temperature_"+str(layer_no).zfill(3)+".h5"
         f = XDMFFile(comm, filename)
         f.read(mesh)
         #MeshTransformation.rescale(mesh, 1e-3, Point(0,0,0)) #in m
         f.close()
         del(f)
         gc.collect()
         bounding_box = mesh.bounding_box_tree()
         hdf5_file = HDF5File(comm, temperature_filename, "r")
         V = FunctionSpace(mesh, "CG", 1) 
         temperature_field_store = Function(V)
         nb_steps = hdf5_file.attributes('Temperature')['count']-1
         timestamp_arr = np.zeros(nb_steps+1)
         for t in range(nb_steps+1):
             vec_name = "/Temperature/vector_%d"%t
             time = hdf5_file.attributes(vec_name)['timestamp']
             timestamp_arr[t] = time
         no = 0
         vec_name = "/Temperature/vector_%d"%no
         hdf5_file.read(temperature_field_store, vec_name)
         #obtain local domain dimensions
         xmax = np.max(mesh.coordinates()[:,0])
         xmin = np.min(mesh.coordinates()[:,0])
         ymax = np.max(mesh.coordinates()[:,1])
         ymin = np.min(mesh.coordinates()[:,1])
         zmax = np.max(mesh.coordinates()[:,2])
         zmin = np.min(mesh.coordinates()[:,2])
         #communicate max/min domain values within FEM communicator
         xmax = comm.allreduce(xmax, op=MPI.MAX)
         ymax = comm.allreduce(ymax, op=MPI.MAX)
         zmax = comm.allreduce(zmax, op=MPI.MAX)
         xmin = comm.allreduce(xmin, op=MPI.MIN)
         ymin = comm.allreduce(ymin, op=MPI.MIN)
         zmin = comm.allreduce(zmin, op=MPI.MIN)
         #z_lim = zmin #DELETE TO CONTROL LAYERS
         #calculate domain lengths and element dimensions
         domain_lengths = [x_lim-xmin,ymax-ymin,zmax-z_lim]
 
         nb_domain_grid_pts = list(np.array(domain_lengths)/np.array(element_dimensions))
         nb_domain_grid_pts = [round(i) for i in nb_domain_grid_pts]
 
         #array to store material (and not vacuum or base plate)
         bulk = np.ones(nb_domain_grid_pts)
         #domain discretisation TO DO:muspectre has this array already
         #x_dim = np.linspace(xmin, x_lim, nb_domain_grid_pts[0])
         #y_dim = np.linspace(ymin, ymax, nb_domain_grid_pts[1])
         #z_dim = np.linspace(z_lim, zmax, nb_domain_grid_pts[2])
     
         #obtain the material (and not internal vacuum) by observing if the point
         #is in or out the submesh. This creates comm.size number of a partially
         #filled array where it is non-zero only on the sub-mesh present at that rank
         #obtain the temperature by searching every point at every sub-mesh domain
         #pass if fenics raises an error (due to point not being present on that domain)
         #for i,x in enumerate(x_dim):
         #    for j,y in enumerate(y_dim):
         #        for k,z in enumerate(z_dim):
         #            if len(bounding_box.compute_collisions(Point(x+0.5*dx,y+0.5*dy,z+0.5*dz))) != 0:
         #                bulk[i,j,k] = 1
 
         del(bounding_box)
         gc.collect()
 
         #gather the material across all FEM ranks and broadcast to all FEM ranks
         #bulk and temperature_array are global (size corresponds to complete domain)
         #bulk = comm.allreduce(bulk, op=MPI.SUM)
         #bulk[bulk > 1] = 1
 
 
         #add base plate at the bottom and vacuum on top and sides
         #increase the number of grid points and total domain length in each dimension
         #bulk = np.insert(bulk, 0, 2, axis=2)
         #nb_domain_grid_pts[2] += 1
         #domain_lengths[2] += element_dimensions[2]
         #bulk = np.insert(bulk, 0, 2, axis=2)
         #nb_domain_grid_pts[2] += 1
         #domain_lengths[2] += element_dimensions[2]
         
-        #vacuum on top layer (1)
+        #vacuum on top layer (2)
+        bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
+        nb_domain_grid_pts[2] += 1
+        domain_lengths[2] += element_dimensions[2]
         bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
         nb_domain_grid_pts[2] += 1
         domain_lengths[2] += element_dimensions[2]
-        #bulk = np.insert(bulk, nb_domain_grid_pts[2], 0, axis=2)
-        #nb_domain_grid_pts[2] += 1
-        #domain_lengths[2] += element_dimensions[2]
 
-        #vacuum on y plane (2)
+        #vacuum on y plane (1)
         bulk = np.insert(bulk, nb_domain_grid_pts[1], 0, axis=1)
         nb_domain_grid_pts[1] += 1
         domain_lengths[1] += element_dimensions[1]
         #bulk = np.insert(bulk, nb_domain_grid_pts[1], 0, axis=1)
         #nb_domain_grid_pts[1] += 1
         #domain_lengths[1] += element_dimensions[1]
 
-        #vacuum on x plane (1)
+        #vacuum on x plane (2)
         bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
         nb_domain_grid_pts[0] += 1
         domain_lengths[0] += element_dimensions[0]
         bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
         nb_domain_grid_pts[0] += 1
         domain_lengths[0] += element_dimensions[0]
         #bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
         #nb_domain_grid_pts[0] += 1
         #domain_lengths[0] += element_dimensions[0]
         #bulk = np.insert(bulk, nb_domain_grid_pts[0], 0, axis=0)
         #nb_domain_grid_pts[0] += 1
         #domain_lengths[0] += element_dimensions[0]
 
         #base layer on bottom layer (10)
         no_base_layer = 22
         for i in range(no_base_layer):
             bulk = np.insert(bulk, 0, 2, axis=2)
             nb_domain_grid_pts[2] += 1
             domain_lengths[2] += element_dimensions[2]
         
         #domain discretisation TO DO:muspectre has this array already
         x_dim = np.linspace(xmin, x_lim+1*element_dimensions[0], nb_domain_grid_pts[0])+0.5*dx
         y_dim = np.linspace(ymin, ymax+0*element_dimensions[1], nb_domain_grid_pts[1])+0.5*dy
-        z_dim = np.linspace(z_lim-no_base_layer*element_dimensions[2], zmax+0*element_dimensions[2], nb_domain_grid_pts[2])+0.5*dz
+        z_dim = np.linspace(z_lim-no_base_layer*element_dimensions[2], zmax+1*element_dimensions[2], nb_domain_grid_pts[2])+0.5*dz
         
         coords = np.zeros([nb_domain_grid_pts[0],nb_domain_grid_pts[1],nb_domain_grid_pts[2],3])
         temperature_array = np.zeros((1,nb_quad)+tuple(nb_domain_grid_pts))
 
         for i in range(nb_domain_grid_pts[0]):
             coords[i,:,:,0] = x_dim[i]
         for i in range(nb_domain_grid_pts[1]):
             coords[:,i,:,1] = y_dim[i]
         for i in range(nb_domain_grid_pts[2]):
             coords[:,:,i,2] = z_dim[i]
 
         
         s_list = [(i,i+0.001) for i in np.linspace(0.005, 0.050, 26)]
         #s_list = [(0.005, 0.05)]
         support_mask = np.ones_like(x_dim)
         for i,x in enumerate(x_dim):
             loc = x
             for j,s in enumerate(s_list):
                 if loc>s[0] and loc<s[1]:
                     support_mask[i] = 0
         for j,y in enumerate(y_dim):
             bulk[:,j,no_base_layer-1] = bulk[:,j,no_base_layer-1]*support_mask
-        #bulk[:,-1,no_base_layer-1] = 0
-        #bulk[-1,:,no_base_layer-1] = 0
+        
+        bulk[:,-1,no_base_layer-1] = 0
+        bulk[-1,:,no_base_layer-1] = 0
+        bulk[-2,:,no_base_layer-1] = 0
+        """
         bulk[:,-1,:] = 0
         bulk[-1,:,:] = 0
         bulk[-2,:,:] = 0
+        """
+        #bulk[:,-1,:] = 0
+        #bulk[-1,:,:] = 0
+        #bulk[-2,:,:] = 0
         #quadrature coordinates relative to the part 
         #local bulk information on each partition
         quad_coordinates_array = np.zeros(tuple(np.shape(temperature_array))+(3,))
         bulk_local = np.zeros_like(bulk)
         for i,x in enumerate(x_dim):
             for j,y in enumerate(y_dim):
                 for k,z in enumerate(z_dim):
                     if bulk[i,j,k] == 1:
                         for q in range(nb_quad):
                             try:
                                 #quad_coordinates_array[0,q,i,j,k] = Point(x+quad_coords[q,0],y+quad_coords[q,1],z+quad_coords[q,2])[:]
                                 quad_coordinates_array[0,q,i,j,k] = Point(x,y,z)[:]
                             except RuntimeError:
                                 pass         
         local_multi_index = []   
         local_multi_index_coordinates = []
         with np.nditer([bulk], flags=['multi_index'], op_flags=['readwrite']) as it:
             for iterable in it:
                 if bulk[it.multi_index] == 1:
                     for q in range(nb_quad):
                         final_index = tuple((0,q))+it.multi_index
                         try:
                             temperature_field_store(quad_coordinates_array[tuple((0,q))+it.multi_index])
                             bulk_local[it.multi_index] = bulk[it.multi_index]
                             local_multi_index.append(final_index)
                             local_multi_index_coordinates.append(quad_coordinates_array[final_index])
                             #break
                         except RuntimeError:
                             pass
         # print("element_dimensions", element_dimensions)
         # print("element_dimensions", nb_domain_grid_pts)
         # print(x_dim)
         
         #store the temperature on the array with the right index if point is found
         #this creates comm.size number of temperature_arrays each non-zero only on 
         #the sub-mesh present in that rank
         #mesh and temperature are divided differently accross communicators!! TO LOOK!
         # for i,x in enumerate(x_dim):
         #     for j,y in enumerate(y_dim):
         #         for k,z in enumerate(z_dim):
         #             if bulk[i,j,k] == 1:
         #                 for q in range(nb_quad):
         #                     try:
         #                         temperature_array[0,q,i,j,k] = temperature_field_store(Point(x,y,z))
         #                     except RuntimeError:
         #                         pass
 
         #send global arrays (material/temperature/coordinates) from FEM communicator
         #to muSpectre communicator
         for proc_rank in send_ranks: 
             
             comm_py.send(rank, proc_rank, tag=0)
             
             comm_py.send(bulk, proc_rank, tag=1)
             
             comm_py.send(nb_domain_grid_pts, proc_rank, tag=3)
     
             comm_py.send(domain_lengths, proc_rank, tag=4)
             
             comm_py.send(coords, proc_rank, tag=5)
             
             comm_py.send(timestamp_arr, proc_rank, tag=6)
             
             comm_py.send(temperature_array, proc_rank, tag=7)
         
 
         list_subdomain_loc = [None] * size_py
         list_nb_subdomain_grid_pts = [None] * size_py
         
         #print(rank_py, send_ranks)
         for f, proc_rank in enumerate(send_ranks):
             list_subdomain_loc[proc_rank] = comm_py.recv(source = proc_rank, tag=proc_rank)
         
         # for i,j in enumerate(list_subdomain_loc):
         #     print(rank, i,j)
         
         for f, proc_rank in enumerate(send_ranks):
             list_nb_subdomain_grid_pts[proc_rank] = comm_py.recv(source = proc_rank, tag=proc_rank)
         
         # for i,j in enumerate(list_nb_subdomain_grid_pts):
         #     print(rank, i,j)
     
         end_time = MPI.Wtime()
         #print("FENICS INIT \n Local rank = {}, took {} seconds".format(rank, end_time-start_time))
     
     #for muSepctre processes
     if color == 1:
         
         #define muSpectre communicator, this communicator is called comm_mu
         comm_mu = muGrid.Communicator(comm)
         #local size and rank on the split communicator
         rank = comm.rank
         procs = comm.size
         
         start_time = MPI.Wtime()
 
         #recieve from FEM communicator material arrays and system size parameters
         recieve_rank = comm_py.recv(source = MPI.ANY_SOURCE, tag=0)
         
         bulk = comm_py.recv(source = recieve_rank, tag=1)
                 
         nb_domain_grid_pts = comm_py.recv(source = recieve_rank, tag=3)
 
         domain_lengths = comm_py.recv(source = recieve_rank, tag=4)
         
         coords = comm_py.recv(source = recieve_rank, tag=5)
         
         #recieve empty timestamp array from FEM communicator
         timestamp_arr = comm_py.recv(source = recieve_rank, tag=6)
         delta_t_vector =  timestamp_arr[1:]-timestamp_arr[:-1]
         delta_t_vector = np.insert(delta_t_vector, 0,0)
         temperature_array = comm_py.recv(source= recieve_rank, tag=7)
         
         #create cell using muSpectre communicator
         cell = msp.cell.CellData.make_parallel(list(nb_domain_grid_pts), list(domain_lengths), comm_mu)
         cell.nb_quad_pts = nb_quad
         cell.get_fields().set_nb_sub_pts("quad", nb_quad)
         
         #define materials
-        material = msp.material.MaterialHEA_3d.make(cell, "material", young, poisson, alpha_s, Delta_E_bs, epsilon_sat, sigma_gb, sigma_s0, sigma_f)
+        alpha_f = 0.55
+        Delta_E_bf = 8
+        #material = msp.material.MaterialHEA_3d.make(cell, "material", young, poisson, alpha_s, Delta_E_bs, epsilon_sat, sigma_gb, sigma_s0, sigma_f)
+        material = msp.material.MaterialHEA_2_3d.make(cell, "material", young, poisson, alpha_s, Delta_E_bs, alpha_f, Delta_E_bf, epsilon_sat, sigma_gb, sigma_s0, sigma_f)
         vacuum = msp.material.MaterialLinearElastic1_3d.make(cell, "vacuum", 0.0, 0.3)
         #base = msp.material.MaterialLinearElastic1_3d.make(cell, "base", 3*young, 0.3)
         #transverse_aniso = [178, 51, 63.4, 0, 0, 0, 178, 63.4, 0, 0, 0, 1655, 0, 0, 0, 634, 0, 0, 634, 0, 63.4]
         transverse_aniso = [3*617e9, 3*236e9, 3*236e9, 0, 0, 0, 3*617e9, 3*236e9, 0, 0, 0, 3*617e9, 0, 0, 0, 3*190.3e9, 0, 0, 3*190.3e9, 0, 3*190.3e9]
         base = msp.material.MaterialLinearAnisotropic_3d.make(cell, "base", transverse_aniso)
 
         #register fields and field collection
         material_phase = cell.get_fields().register_int_field("material2", 1, 'quad')
         bulk_array = np.zeros(np.shape(material_phase.array()), np.dtype(float))
         melt_bool = -1*np.ones(np.shape(bulk_array), np.dtype(np.int32))
         sub_domain_loc = cell.subdomain_locations
         nb_subdomain_grid_pts = cell.nb_subdomain_grid_pts
         #assign materials
         for pixel_index, pixel in enumerate(cell.pixels):
             i, j, k = pixel[0], pixel[1], pixel[2]
             a = i - sub_domain_loc[0]
             b = j - sub_domain_loc[1]
             c = k - sub_domain_loc[2]
             if bulk[tuple(pixel)]==1:
                 material.add_pixel(pixel_index)
                 bulk_array[a,b,c] = 1
             elif bulk[tuple(pixel)]==0:
                 vacuum.add_pixel(pixel_index)
             elif bulk[tuple(pixel)]==2:
                 base.add_pixel(pixel_index)
           
         #define solver parameters 
         #gradient = Stencils3D.trilinear_1q_finite_element
         gradient = Stencils3D.averaged_upwind
         weights = nb_quad * [1]
 
         material.identify_temperature_loaded_quad_pts(cell, bulk_array.reshape(-1,1, order="F"))
         #material.dt = 2.5e-5
-        material.T_m = 1650
+        material.T_m = 1600
         material.alpha_thermal = 22e-6
-        
+        material.dt = 2.5e-4
 
         #local_bulk_array = bulk[sub_domain_loc[0]:sub_domain_loc[0]+nb_subdomain_grid_pts[0], sub_domain_loc[1]:sub_domain_loc[1]+nb_subdomain_grid_pts[1], sub_domain_loc[2]:sub_domain_loc[2]+nb_subdomain_grid_pts[2]]
 
         ## use solver 
         krylov_solver = msp.solvers.KrylovSolverCG(cg_tol, maxiter, msp.Verbosity.Silent)
         solver = msp.solvers.SolverNewtonCG(cell, krylov_solver, msp.Verbosity.Silent, newton_tol, equil_tol, maxiter, gradient, weights)    
         solver.formulation = msp.Formulation.small_strain
         solver.initialise_cell()
 
     
         #define file input/output parameters for muSpectre output
         # output_filename = "layer_"+str(layer_no-1).zfill(3)+".nc"
         # if comm_mu.rank == 0 and os.path.exists(output_filename):
         #     os.remove(output_filename)
         output_filename = "layer_"+str(layer_no).zfill(3)+".nc"
         if comm_mu.rank == 0 and os.path.exists(output_filename):
             os.remove(output_filename)        
         comm_mu.barrier()
         
         file_io_object_write = muGrid.FileIONetCDF(output_filename, 
                                       muGrid.FileIONetCDF.OpenMode.Write, 
                                       comm_mu)
                                               
         #coordinates = cell.get_fields().register_real_field("coordinates", 3, 'quad')
         #temperature_field = cell.get_fields().register_real_field("temperature2", 1, 'quad')
         #vacuum_strain_field = cell.get_fields().register_real_field("vacuum_strain2", (3,3), "quad")
 
         file_io_object_write.register_field_collection(field_collection=cell.fields)
         file_io_object_write.register_field_collection(field_collection=material.collection)
         #print("cell field names:\n", cell.get_field_names())
         #initialize global time attribute (to be updated once filled)
         
         file_io_object_write.write_global_attribute('timestamp', timestamp_arr)
         
 
         #create pointer-like copies for fields
         material_phase_alias = np.array(material_phase, copy=False)
         #coordinates_alias = np.array(coordinates, copy=False)
         #temperature_alias = np.array(temperature_field, copy=False)
         #vacuum_strain_alias = np.array(vacuum_strain_field, copy=False)
         #melt_bool = -1*np.ones(np.shape(material_phase), np.dtype(np.int32))
         #assign values to the above created aliases
         for pixel_id, pixel_coord in cell.pixels.enumerate():
             i, j, k = pixel_coord[0], pixel_coord[1], pixel_coord[2]
             for q in range(nb_quad):
                 # local coordinates on the processor
                 a = i - sub_domain_loc[0]
                 b = j - sub_domain_loc[1]
                 c = k - sub_domain_loc[2]
         
                 material_phase_alias[a, b, c] = int(bulk[i, j, k])
                 #coordinates_alias[0, q, a, b, c] = coords[i,j,k][0]+quad_coords[q,0]
                 #coordinates_alias[1, q, a, b, c] = coords[i,j,k][1]+quad_coords[q,1]
                 #coordinates_alias[2, q, a, b, c] = coords[i,j,k][2]+quad_coords[q,2]
                 #if k == nb_domain_grid_pts[2]-2:
                     #melt_bool[0,q,a,b,c] = 0
         #use the temperature array to change the values using temperature
         #array pointer (alias)
         #temperature_alias[:,:,:,:,:] = update_temperature_field(cell, temperature_array)[:,:,:,:,:]         
 
         comm_mu.barrier()
         if layer_no not in [10,30,50,70,60,90,130,170,210,250,268]:
 
             from transfer_previous_layer_1q import transfer_previous_layer
 
             strain_field = cell.get_fields().get_field("grad")
             strain_field_alias = np.array(strain_field, copy=False)
             stress_field = cell.get_fields().get_field("flux")
             stress_field_alias = np.array(stress_field, copy=False)
             size_array = cell.get_fields().get_field("flux").array()
 
             if rank == 0: 
 
                 previous_data = transfer_previous_layer(target_directory, layer_no-1, bulk, nb_domain_grid_pts, domain_lengths)
                 #, np.max(coords[:,:,:,0]), np.max(coords[:,:,:,1]), np.max(coords[:,:,:,2]), np.min(coords[:,:,:,0]), np.min(coords[:,:,:,1]), np.min(coords[:,:,:,2])) 
             else:
                 previous_data = None
             comm_mu.barrier()
 
             previous_data = comm.bcast(previous_data, root = 0)
             [previous_stress, previous_strain, previous_epsilon_bar, previous_melt_pool, previous_material_temperature, previous_epsilon_plastic, previous_material_vacuum_strain] = previous_data
             sub_dx, sub_dy, sub_dz = sub_domain_loc[0], sub_domain_loc[1], sub_domain_loc[2]
             sub_gx, sub_gy, sub_gz = nb_subdomain_grid_pts[0], nb_subdomain_grid_pts[1], nb_subdomain_grid_pts[2]
             previous_epsilon_bar = previous_epsilon_bar[sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
             previous_melt_pool = previous_melt_pool[sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
             previous_material_temperature = previous_material_temperature[sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
             previous_epsilon_plastic = previous_epsilon_plastic[:,:,sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
             previous_material_vacuum_strain = 0*previous_material_vacuum_strain[:,:,sub_dx:sub_dx+sub_gx,sub_dy:sub_dy+sub_gy,sub_dz:sub_dz+sub_gz]
 
             #assign values to the above created aliases
             for pixel_id, pixel_coord in cell.pixels.enumerate():
                 i, j, k = pixel_coord[0], pixel_coord[1], pixel_coord[2]
                 a = i - sub_domain_loc[0]
                 b = j - sub_domain_loc[1]
                 c = k - sub_domain_loc[2]
                 if k < nb_domain_grid_pts[2]-1:
                     for q in range(nb_quad):
                         # local coordinates on the processor
 
                         #strain_field_alias[:, :, q, a, b, c] = previous_strain[:, :, q, i, j, k]
                         stress_field_alias[:, :, q, a, b, c] = previous_stress[:, :, q, i, j, k]
                         #temperature_alias[0,q,a,b,c] = previous_temperature[0,q,i,j,k]
                         #vacuum_strain_alias[:, :, q, a, b, c] = previous_vacuum_strain[:, :, q, i, j, k]
                 #if k == nb_domain_grid_pts[2]-2:
                         #vacuum_strain_alias[:, :, q, a, b, c] = previous_strain[:, :, q, i, j, k]
 
             material.update_temperature_field(cell, previous_material_temperature.reshape(-1,1, order="F"))
             material.update_epsilon_bar_field(cell, previous_epsilon_bar.reshape(-1,1, order="F"))
             material.update_melt_bool_field(cell, previous_melt_pool.reshape(-1,1, order="F"))
             material.update_epsilon_plastic_field(cell, previous_epsilon_plastic.reshape(-1,1, order="F"))
             material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
             del(previous_stress, previous_strain, previous_epsilon_bar, previous_melt_pool, previous_material_temperature, previous_epsilon_plastic)
             gc.collect()
         
         else:
             #use the temperature array to change the values using temperature
             #array pointer (alias)
             size_array = cell.get_fields().get_field("flux").array()
             material.update_temperature_field(cell, update_temperature_field(cell, temperature_array).reshape(-1,1, order="F"))
             material.update_epsilon_bar_field(cell, np.zeros_like(bulk_array).reshape(-1,1, order="F"))
             material.update_epsilon_plastic_field(cell, np.zeros_like(size_array).reshape(-1,1, order="F"))
             material.update_melt_bool_field(cell, melt_bool.reshape(-1,1, order="F"))
             material.update_vacuum_strain_field(cell, np.zeros_like(size_array).reshape(-1,1, order="F"))
             strain_field = cell.get_fields().get_field("grad")
             strain_field_alias = np.array(strain_field, copy=False)
 
         comm_mu.barrier()
         
         comm_py.send([cell.subdomain_locations[0],cell.subdomain_locations[1],cell.subdomain_locations[2]], recieve_rank, tag=rank+size_py-procs)
         
         comm_py.send([cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]], recieve_rank, tag=rank+size_py-procs)
 
         end_time = MPI.Wtime()
         #print("MUSPECTRE INIT \n Local rank = {}, took {} seconds".format(rank, end_time-start_time))
 
     #make nb_Steps global by broadcasting
     nb_steps = comm_py.bcast(nb_steps, root = 0)
+
     start = timer.time()
     comm_py.barrier()
     #seperate operations for FEM and muSpectre communicators depending on ranks
     if color == 1:
         #temperature_array = np.zeros(nb_domain_grid_pts)
         
         start_time = MPI.Wtime()
         
         dummy_temperature = np.zeros((1,cell.nb_quad_pts, cell.nb_subdomain_grid_pts[0],cell.nb_subdomain_grid_pts[1],cell.nb_subdomain_grid_pts[2]))
         if layer_no%2 == 0: 
             release_limit = 11910
         else:
             release_limit = 11870
+
+                
+        loc1, loc2, loc3 = cell.nb_subdomain_grid_pts[0]//2, cell.nb_subdomain_grid_pts[1]//2, cell.nb_subdomain_grid_pts[2]//2
+        local_time = 0
+        """
+        if loc3 != 0:
+            print(rank, np.size(cell.get_fields().get_field('flux').array()[:,:,0,loc1, loc2, loc3]))
+            store = np.zeros((nb_steps, np.size(cell.get_fields().get_field('flux').array()[:,:,0,loc1, loc2, loc3]) + 
+                    np.size(cell.get_fields().get_field('grad').array()[:,:,0,loc1, loc2, loc3]) + 
+                    np.size(cell.get_globalised_current_real_field("epsilon_plastic").array()[:,0,loc1, loc2, loc3]) + 
+                    np.size(cell.get_globalised_current_real_field("epsilon_bar").array()[loc1, loc2, loc3]) + 
+                    np.size(cell.get_globalised_internal_real_field("temperature").array()[loc1, loc2, loc3]) + 1))
+            local_step = np.array([local_time])
+            local_step = np.append(local_step, cell.get_fields().get_field('flux').array()[:,:,0,loc1, loc2, loc3].flatten())
+            local_step = np.append(local_step, cell.get_fields().get_field('grad').array()[:,:,0,loc1, loc2, loc3].flatten())
+            local_step = np.append(local_step, cell.get_globalised_current_real_field("epsilon_plastic").array()[:,0,loc1, loc2, loc3].flatten())
+            local_step = np.append(local_step, cell.get_globalised_current_real_field("epsilon_bar").array()[loc1, loc2, loc3].flatten())
+            local_step = np.append(local_step, cell.get_globalised_internal_real_field("temperature").array()[loc1, loc2, loc3].flatten())
+            store[0,:] = local_step
+        """
         #for all steps
         for s in range(nb_steps):
             step_start = MPI.Wtime()
             starting_step = MPI.Wtime()
             #if rank == 0:
             #    print('=== STEP {}/{} ==='.format(s+1, nb_steps))
             #    print(delta_t_vector[s], timestamp_arr[s])
             
             #recieve temp_array from constant sender request
             #temperature_array = np.zeros(nb_domain_grid_pts)
             #if rank == 0: print("MUSPECTRE Step {} --- Recieve request created: {}".format(s, MPI.Wtime()- step_start))
-            material.dt = delta_t_vector[s]
+            #material.dt = delta_t_vector[s]
             req  = comm_py.Irecv(dummy_temperature, source = recieve_rank, tag=s)
             #if rank == 0: print("MUSPECTRE Step {} --- Recieve request done: {}".format(s, MPI.Wtime()- step_start))
             #if (s+1)%500 == 0:
                 #field_1 = cell.get_fields().get_field("grad")
                 #field_1_alias = np.array(field_1, copy = False)
                 #field_3 = cell.get_globalised_internal_real_field("vacuum_strain").array().reshape((dim, dim, nb_quad)+tuple(nb_subdomain_grid_pts), order="f")
                 #print(rank, nb_domain_grid_pts[0]-nb_subdomain_grid_pts[0], sub_domain_loc[0], np.shape(strain_field_alias))
                 #print(rank, nb_domain_grid_pts[1]-nb_subdomain_grid_pts[1], sub_domain_loc[1], np.shape(strain_field_alias))
                 #print(rank, nb_domain_grid_pts[2]-nb_subdomain_grid_pts[2], sub_domain_loc[2], np.shape(strain_field_alias))
                 #strain_field_alias -= field_3
                 #field_3_alias = np.array(field_3, copy=False)
                 #field_3_alias[:,:,:,:,:] = 0
                 #strain_field_alias[:,:,:,-1,:,:] = 0
                 #strain_field_alias[:,:,:,:,-1,:] = 0
                 #strain_field_alias[:,:,:,:,:,-1] = 0
             if s == release_limit: 
-            #    strain_field_alias[:,:,:,:,:,:] = 0
-                if layer_no == 268: material.update_vacuum_strain_field(cell, np.zeros_like(size_array).reshape(-1,1, order="F"))
-                else: material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
+                if layer_no == 268:
+                    strain_field_alias -= cell.get_globalised_internal_real_field('vacuum_strain').array().reshape((dim, dim, nb_quad)+tuple(nb_subdomain_grid_pts), order='f')
+                    material.update_vacuum_strain_field(cell, np.zeros_like(size_array).reshape(-1,1, order="F"))
+                else: 
+                    strain_field_alias -= cell.get_globalised_internal_real_field('vacuum_strain').array().reshape((dim, dim, nb_quad)+tuple(nb_subdomain_grid_pts), order='f')
+                    material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
                 #print(rank, np.shape(field_1), np.shape(strain_field_alias), np.shape(field_3), nb_subdomain_grid_pts[0], nb_subdomain_grid_pts[1], nb_subdomain_grid_pts[2])
             solve_start = MPI.Wtime()
             #try:
                 #solve machanics problem
             res = solver.solve_load_increment(strain_step)
             #if rank ==0 : print(s, delta_t_vector[s], res.newton_norm, res.equil_norm)
                 #comm_mu.barrier()
             #except RuntimeError:
             #    if rank == 0: 
             #        print("failed to converge at step", s)
             #    strain_field_alias[:,:,:,:,:,-2:] *= 1.05
             #    strain_field_alias[:,:,:,:,-2:,:] *= 1.05
             #    strain_field_alias[:,:,:,-2:,:,:] *= 1.05
             #    material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
             #    res = solver.solve_load_increment(strain_step)
                 #file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
                 #file_io_object_write.update_global_attribute('timestamp', 'timestamp', timestamp_arr)
                 #file_io_object_write.close()
                 #break
             comm_mu.barrier()
+            """
+            if loc3 != 0:
+                local_time += delta_t_vector[s]
+                local_step = np.array([local_time])
+                local_step = np.append(local_step, cell.get_fields().get_field('flux').array()[:,:,0,loc1, loc2, loc3].flatten())
+                local_step = np.append(local_step, cell.get_fields().get_field('grad').array()[:,:,0,loc1, loc2, loc3].flatten())
+                local_step = np.append(local_step, cell.get_globalised_current_real_field("epsilon_plastic").array()[:,0,loc1, loc2, loc3].flatten())
+                local_step = np.append(local_step, cell.get_globalised_current_real_field("epsilon_bar").array()[loc1, loc2, loc3].flatten())
+                local_step = np.append(local_step, cell.get_globalised_internal_real_field("temperature").array()[loc1, loc2, loc3].flatten())
+                store[s,:] = local_step
+            """
             #if rank == 0: print("newton_norm: ", res.newton_norm, "   equil norm: ", res.equil_norm)
             #if rank == 0: print("MUSPECTRE Rank {} - Step {} --- Total Solve time: {}".format(rank, s, MPI.Wtime()- solve_start) )
 
             #write all fields in .nc format TO DO:not all fields
             #file_io_object_write.append_frame().write(["flux","grad", "epsilon_bar", "epsilon_plastic","temperature", "vacuum_strain", "melt_pool", "material2"])
             #if (s+1)%5 == 0:
                 #print('=== STEP {}/{} ==='.format(s+1, nb_steps))
             	#write all fields in .nc format TO DO:not all fields
                 #file_io_object_write.append_frame().write(["coordinates", "flux", "grad", "material2","temperature2", "epsilon_bar", "temperature", "vacuum_strain", "epsilon_plastic"])
                  #if (s+1) == 100: break
             #print("MUSPECTRE Step {} Rank {} --- Step done: {}".format(s, rank, MPI.Wtime()- step_start))
                         
             #make sure temperature array is recieved       
             step_start = MPI.Wtime() 
             req.wait()
             #if rank == 0: print("MUSPECTRE Step {} --- Recieve request Wait done: {}".format(s, MPI.Wtime()- step_start))
             
             #use the temperature array to change the values using temperature
             #array pointer (alias)
             #temperature_alias[:,:,:,:,:] = dummy_temperature
             #dummy_temperature = np.ones_like(dummy_temperature)*2005 - 4*s
             step_start = MPI.Wtime() 
             material.update_temperature_field(cell, dummy_temperature.reshape(-1,1, order="F"))
             #if rank == 0: print("MUSPECTRE Step {} --- Temperature Update done: {}".format(s, MPI.Wtime()- step_start))
             #if (s+1) <= 1910 and (s+1)>1810:
             #file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
             #if (s+1)%11816 == 0:
                 #if rank == 0: print('=== STEP {}/{} ==='.format(s, nb_steps))
                 #file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
                 #break
             #if rank == 0: print("MUSPECTRE Step {} COMPLETE --- in: {}".format(s, MPI.Wtime()- starting_step))
         #try:
         #material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
         res = solver.solve_load_increment(strain_step)
-        if rank ==0 : print(s, delta_t_vector[s], res.newton_norm, res.equil_norm)
+        #if rank ==0 : print(s, delta_t_vector[s], res.newton_norm, res.equil_norm)
         #except RuntimeError:
         #    print("failed to converge at last step")
         #    strain_field_alias[:,:,:,:,:,:] = 0
         #    material.update_vacuum_strain_field(cell, previous_material_vacuum_strain.reshape(-1,1, order="F"))
         #    res = solver.solve_load_increment(strain_step)
         #write all fields in .nc format TO DO:not all fields
         file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
         if layer_no == 333:
             dummy_temperature[:] = 293.0
             material.update_temperature_field(cell, dummy_temperature.reshape(-1,1, order="F"))
             material.dt = 3000
             res = solver.solve_load_increment(strain_step)
             file_io_object_write.append_frame().write(["flux", "grad", "epsilon_bar", "epsilon_plastic", "temperature", "vacuum_strain", "melt_pool", "material2"])
 
         end_time = MPI.Wtime()
         if rank == 0: print("Total MUSPECTRE time: {}".format(end_time-start))
-
+        """        
+        if loc3 != 0:        
+            local_name = target_directory+'/store_'+str(int(rank)).zfill(2)+'.npz'
+            np.savez(local_name, store)
+        """        
         comm_mu.barrier()   
 
     if color == 0:
         
 
         dummy_array = np.zeros((1,nb_quad)+tuple(nb_domain_grid_pts))
         for s in range(nb_steps):
             start_time = MPI.Wtime()
             starting_step = MPI.Wtime()
             #start constant sending request
             #print("FENICS Step {} --- Send Requests created: {}".format(s, MPI.Wtime()-start_time))
             #reqs = [None] * len(send_ranks)
             #for p, proc_rank in enumerate(send_ranks):
             #    reqs[p] = comm_py.Isend([temperature_array, MPI.DOUBLE], proc_rank, tag=s)
             #print("FENICS Step {} --- Send Requests done: {}".format(s, MPI.Wtime()-start_time))
             #re-read the temperature for the next step
             no += 1
             vec_name = "/Temperature/vector_%d"%no
             timestamp = hdf5_file.attributes(vec_name)['timestamp']
             timestamp_arr[s] = timestamp
             hdf5_file.read(temperature_field_store, vec_name)
             #store it on a dummy array since we don't want to change 
             #temperature array before the sending/recieving is done
             #which will be ensured by MPI.wait
             #dummy_array = np.zeros((1,nb_quad)+tuple(nb_domain_grid_pts))
             #if rank == 0: print("FENICS Step {} --- File Reading: {}".format(s, MPI.Wtime()-start_time))
             #store the temperature on the array with the right index if point is found
             #this creates comm.size number of temperature_arrays each non-zero only on 
             #the sub-mesh present in that rank
             #mesh and temperature are divided differently accross communicators!! TO LOOK!
             loop_time = MPI.Wtime()
             #with np.nditer([bulk_local, dummy_array[0,0,:,:,:]], flags=['multi_index'], op_flags=[['readonly', 'arraymask'], ['writeonly', 'writemasked']]) as it:
             #    for iterable in it:
             #        if bulk_local[it.multi_index] == 1:
             #            for q in range(nb_quad):
             #                try:
             #                    var = temperature_field_store(quad_coordinates_array[tuple((0,q))+it.multi_index])
             #                    if var < 0:
             #                        pass
             #                    else:
             #                        dummy_array[tuple((0,q))+it.multi_index] = var
             #                except RuntimeError:
             #                    pass
 
             for lmi_index, lmi in enumerate(local_multi_index):
                 val = temperature_field_store(quad_coordinates_array[lmi])
                 if val <= 0:
                     dummy_array[lmi] = 0
                 else:
                     dummy_array[lmi] = val
             #if rank == 0: print("FENICS Step {} --- Looping done: {}".format(s, MPI.Wtime()-loop_time))
             #make sure temperature sending is complete and create the new 
             #temperature array
             start_time = MPI.Wtime()
             if s != 0:
                 for p, proc_rank in enumerate(send_ranks):
                     reqs[p].wait()
             #if rank == 0: print("FENICS Step {} --- Previous Requests Wait done: {}".format(s, MPI.Wtime()-start_time))
 
             start_time = MPI.Wtime()
             temperature_array = comm.allreduce(dummy_array, op=MPI.SUM)
             temperature_array = temperature_array + 293
             #if rank == 0: print("FENICS Step {} --- Temperature Reduce done: {}".format(s, MPI.Wtime()-start_time))
             #start constant sending request
             start_time = MPI.Wtime()
             reqs = [None] * len(send_ranks)
             for p, proc_rank in enumerate(send_ranks):
                 send_temp = np.array(temperature_array[:,:,list_subdomain_loc[proc_rank][0]:list_subdomain_loc[proc_rank][0]+list_nb_subdomain_grid_pts[proc_rank][0],list_subdomain_loc[proc_rank][1]:list_subdomain_loc[proc_rank][1]+list_nb_subdomain_grid_pts[proc_rank][1],list_subdomain_loc[proc_rank][2]:list_subdomain_loc[proc_rank][2]+list_nb_subdomain_grid_pts[proc_rank][2]])
                 #print(p,proc_rank, list_subdomain_loc[proc_rank], list_nb_subdomain_grid_pts[proc_rank])
                 #print('send_shape', np.shape(send_temp))
                 reqs[p] = comm_py.Isend([send_temp, MPI.DOUBLE], proc_rank, tag=s)
 
             #if (s+1)%11816 == 0:
                 #print('=== STEP {}/{} ==='.format(s+1, nb_steps))
                 #break
             #if rank == 0: print("FENICS Step {} --- Request sending done: {}".format(s, MPI.Wtime()-start_time))
             #if rank == 0: print("FENICS Step {} COMPLETE --- in: {}".format(s, MPI.Wtime()- starting_step))
             #if rank == 0: print("FENICS Step {} --- Step done: {}".format(s, MPI.Wtime()-start_time))
         
         comm.barrier()
 
         end_time = MPI.Wtime()
         if rank == 0: print("Total FENICS time: {}".format(end_time-start))
      
     #make timestamp array global by broadcasting
     timestamp_arr = comm_py.bcast(timestamp_arr, root = 0)
 
     #if on the muSpectre communicator, update the global timestamp
     if color == 1:
         file_io_object_write.update_global_attribute('timestamp', 'timestamp', timestamp_arr)
         file_io_object_write.close()
     comm_py.barrier()    
     if rank_py == 0: print("Total run time:", timer.time()-start)
 
 
     
     
 if __name__=='__main__':
 
 
     main()
diff --git a/mechanical_model/run_hea_1q.sh b/mechanical_model/run_hea_1q.sh
index 0b0d6f6..62cb603 100644
--- a/mechanical_model/run_hea_1q.sh
+++ b/mechanical_model/run_hea_1q.sh
@@ -1,43 +1,43 @@
 #!/bin/bash
 
 #SBATCH --qos serial
 #SBATCH --nodes 1
 #SBATCH --ntasks 1
 #SBATCH --cpus-per-task 36
 #SBATCH --mem 252000
-#SBATCH --time 71:59:59
+#SBATCH --time 15:59:59
 #SBATCH --error=slurm-%j.stderr
 #SBATCH --output=slurm-%j.stdout
 #SBATCH --job-name=mech_model
 #SBATCH --mail-user=recep.kubilay@epfl.ch
 #SBATCH --mail-type=BEGIN,END,FAIL
 
 set -e
 
 module purge
 source /home/kubilay/anaconda3/bin/activate
 conda activate muspectre_env2
 export LD_LIBRARY_PATH=/home/kubilay/pnetcdf/lib/:$LD_LIBRARY_PATH
 
-LOG_FILE="mechanical_model_hea13.log"
+LOG_FILE="mechanical_model_hea14.log"
 echo "*** STARTING JOB ***" >$LOG_FILE
 
-START_LAYER=268
-LAYERS=269
+START_LAYER=333
+LAYERS=333
 PROCS=27
 export OPENBLAS_NUM_THREADS=1
 
-FACTOR=1.3
+FACTOR=1.4
 TARGET_DIRECTORY="/scratch/kubilay/mechanical_model/results/factor"_$FACTOR
 mkdir -p $TARGET_DIRECTORY
 
 for z in $(seq $START_LAYER 1 $LAYERS)
 do
 echo $z >>$LOG_FILE
-mpirun -np 36 python3 -u residual_stress_analysis_hea_1q.py $z $PROCS $FACTOR $TARGET_DIRECTORY >>$LOG_FILE 
+#mpirun -np 36 python3 -u residual_stress_analysis_hea_1q.py $z $PROCS $FACTOR $TARGET_DIRECTORY >>$LOG_FILE 
 python3 -u netcdf2h5_hea_1q.py $z $FACTOR $TARGET_DIRECTORY >> $LOG_FILE
 done
 
 conda deactivate
 
 echo "*** JOB FINISHED ***" >>$LOG_FILE
diff --git a/mechanical_model/run_hea_1q.sh b/mechanical_model/run_hea_1q_2.sh
similarity index 68%
copy from mechanical_model/run_hea_1q.sh
copy to mechanical_model/run_hea_1q_2.sh
index 0b0d6f6..a4924c6 100644
--- a/mechanical_model/run_hea_1q.sh
+++ b/mechanical_model/run_hea_1q_2.sh
@@ -1,43 +1,43 @@
 #!/bin/bash
 
 #SBATCH --qos serial
 #SBATCH --nodes 1
 #SBATCH --ntasks 1
 #SBATCH --cpus-per-task 36
 #SBATCH --mem 252000
 #SBATCH --time 71:59:59
 #SBATCH --error=slurm-%j.stderr
 #SBATCH --output=slurm-%j.stdout
 #SBATCH --job-name=mech_model
 #SBATCH --mail-user=recep.kubilay@epfl.ch
 #SBATCH --mail-type=BEGIN,END,FAIL
 
 set -e
 
 module purge
 source /home/kubilay/anaconda3/bin/activate
 conda activate muspectre_env2
 export LD_LIBRARY_PATH=/home/kubilay/pnetcdf/lib/:$LD_LIBRARY_PATH
 
-LOG_FILE="mechanical_model_hea13.log"
+LOG_FILE="mechanical_model_hea10.log"
 echo "*** STARTING JOB ***" >$LOG_FILE
 
 START_LAYER=268
-LAYERS=269
+LAYERS=333
 PROCS=27
 export OPENBLAS_NUM_THREADS=1
 
-FACTOR=1.3
-TARGET_DIRECTORY="/scratch/kubilay/mechanical_model/results/factor"_$FACTOR
+FACTOR=1.0
+TARGET_DIRECTORY="/scratch/kubilay/mechanical_model/results/rate_dependent/factor"_$FACTOR
 mkdir -p $TARGET_DIRECTORY
 
 for z in $(seq $START_LAYER 1 $LAYERS)
 do
 echo $z >>$LOG_FILE
-mpirun -np 36 python3 -u residual_stress_analysis_hea_1q.py $z $PROCS $FACTOR $TARGET_DIRECTORY >>$LOG_FILE 
-python3 -u netcdf2h5_hea_1q.py $z $FACTOR $TARGET_DIRECTORY >> $LOG_FILE
+mpirun -np 36 python3 -u residual_stress_analysis_hea_1q_2.py $z $PROCS $FACTOR $TARGET_DIRECTORY >>$LOG_FILE 
+python3 -u netcdf2h5_hea_1q_2.py $z $FACTOR $TARGET_DIRECTORY >> $LOG_FILE
 done
 
 conda deactivate
 
 echo "*** JOB FINISHED ***" >>$LOG_FILE