Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F64468261
dumper_restart.cc
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Mon, May 27, 02:32
Size
10 KB
Mime Type
text/x-c++
Expires
Wed, May 29, 02:32 (1 d, 23 h)
Engine
blob
Format
Raw Data
Handle
17908985
Attached To
rLIBMULTISCALE LibMultiScale
dumper_restart.cc
View Options
/**
* @file dumper_restart.cc
*
* @author Guillaume Anciaux <guillaume.anciaux@epfl.ch>
*
* @date Wed Aug 20 16:58:08 2014
*
* @brief This dumper saves the state of the simulation
*
* @section LICENSE
*
* Copyright INRIA and CEA
*
* The LibMultiScale is a C++ parallel framework for the multiscale
* coupling methods dedicated to material simulations. This framework
* provides an API which makes it possible to program coupled simulations
* and integration of already existing codes.
*
* This Project was initiated in a collaboration between INRIA Futurs Bordeaux
* within ScAlApplix team and CEA/DPTA Ile de France.
* The project is now continued at the Ecole Polytechnique Fédérale de Lausanne
* within the LSMS/ENAC laboratory.
*
* This software is governed by the CeCILL-C license under French law and
* abiding by the rules of distribution of free software. You can use,
* modify and/ or redistribute the software under the terms of the CeCILL-C
* license as circulated by CEA, CNRS and INRIA at the following URL
* "http://www.cecill.info".
*
* As a counterpart to the access to the source code and rights to copy,
* modify and redistribute granted by the license, users are provided only
* with a limited warranty and the software's author, the holder of the
* economic rights, and the successive licensors have only limited
* liability.
*
* In this respect, the user's attention is drawn to the risks associated
* with loading, using, modifying and/or developing or reproducing the
* software by the user in light of its specific status of free software,
* that may mean that it is complicated to manipulate, and that also
* therefore means that it is reserved for developers and experienced
* professionals having in-depth computer knowledge. Users are therefore
* encouraged to load and test the software's suitability as regards their
* requirements in conditions enabling the security of their systems and/or
* data to be ensured and, more generally, to use and operate it in the
* same conditions as regards security.
*
* The fact that you are presently reading this means that you have had
* knowledge of the CeCILL-C license and that you accept its terms.
*
*/
/* -------------------------------------------------------------------------- */
#include "lm_common.hh"
#include "dumper_restart.hh"
#include "lib_md.hh"
#include "lib_dd.hh"
#include "lib_continuum.hh"
#include "filter.hh"
#include "ref_point_data.hh"
#include <iomanip>
#include <mpi.h>
/* -------------------------------------------------------------------------- */
__BEGIN_LIBMULTISCALE__
/* -------------------------------------------------------------------------- */
//! requests for asynbchronous comms. Only valid for root
static
std
::
vector
<
MPI_Request
>
requests
;
//! local number of Dofs per proc. Only valid for root (rank = 0) processor
static
std
::
vector
<
UInt
>
nb_local_per_proc
;
//! buffer data used to gather information on the one that will write to disk. Only valid for root
static
std
::
vector
<
Real
*>
data_per_proc
;
/* -------------------------------------------------------------------------- */
template
<
typename
_Input
>
DumperRestart
<
_Input
>::
DumperRestart
(
const
std
::
string
&
name
,
ComponentLMInterface
&
d
)
:
Dumper
<
_Input
>
(
name
,
d
){
nb_local_dofs
=
0
;
allocated_size
=
0
;
data
=
NULL
;
text_flag
=
false
;
if
(
lm_my_proc_id
==
0
){
nb_local_per_proc
.
resize
(
lm_world_size
);
data_per_proc
.
resize
(
lm_world_size
);
requests
.
resize
(
lm_world_size
);
nb_local_per_proc
.
assign
(
lm_world_size
,
0
);
data_per_proc
.
assign
(
lm_world_size
,
0
);
requests
.
assign
(
lm_world_size
,
0
);
for
(
UInt
i
=
0
;
i
<
lm_world_size
;
++
i
)
data_per_proc
[
i
]
=
NULL
;
}
}
/* -------------------------------------------------------------------------- */
template
<
typename
Cont
>
DumperRestart
<
Cont
>::~
DumperRestart
(){
if
(
lm_my_proc_id
==
0
){
//liberation de la memoire que je veux pas maintenir en parallel
DUMP
(
"freeing data_per_proc"
,
DBG_INFO
);
for
(
UInt
i
=
1
;
i
<
lm_world_size
;
++
i
){
DUMP
(
"freeing ptr["
<<
i
<<
"] "
<<
data_per_proc
[
i
],
DBG_INFO
);
if
(
data_per_proc
[
i
]
!=
NULL
){
delete
(
data_per_proc
[
i
]);
data_per_proc
[
i
]
=
NULL
;
}
}
nb_local_per_proc
.
clear
();
data_per_proc
.
clear
();
requests
.
clear
();
}
if
(
data
)
delete
(
data
);
}
/* -------------------------------------------------------------------------- */
template
<
typename
Cont
>
void
DumperRestart
<
Cont
>::
dump
(
Cont
&
cont
){
static
const
UInt
Dim
=
Cont
::
Dim
;
LMFile
file
;
UInt
nbTotalDofs
=
0
;
if
(
lm_my_proc_id
==
0
){
std
::
stringstream
temp
;
temp
<<
this
->
getBaseName
()
<<
"_restart-"
<<
std
::
setfill
(
'0'
)
<<
std
::
setw
(
4
)
<<
this
->
action_step
<<
".xml"
;
file
.
open
(
temp
.
str
(),
"wb"
,
true
);
b64
.
setOutputFile
(
file
);
}
bool
need_reallocate
=
false
;
if
(
allocated_size
<
cont
.
nbElem
()
&&
cont
.
nbElem
()
!=
0
)
need_reallocate
=
true
;
nb_local_dofs
=
cont
.
nbElem
();
MPI_Gather
(
&
nb_local_dofs
,
1
,
MPI_INT
,
&
nb_local_per_proc
[
0
],
1
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
DUMP
(
"Gather done"
,
DBG_INFO
);
if
(
lm_my_proc_id
==
0
){
for
(
UInt
i
=
0
;
i
<
lm_world_size
;
++
i
){
DUMP
(
"nb_local["
<<
i
<<
"]="
<<
nb_local_per_proc
[
i
]
<<
" "
<<
nb_local_dofs
,
DBG_INFO
);
nbTotalDofs
+=
nb_local_per_proc
[
i
];
}
printHeaders
(
nbTotalDofs
,
Dim
,
file
);
}
// allocation des array de stockage
if
(
need_reallocate
){
allocated_size
=
nb_local_dofs
;
DUMP
(
"(re)allocating for size = "
<<
nb_local_dofs
,
DBG_INFO
);
if
(
data
!=
NULL
)
data
=
(
Real
*
)
realloc
(
data
,
nb_local_dofs
*
sizeof
(
Real
)
*
Dim
);
else
{
data
=
(
Real
*
)
malloc
(
nb_local_dofs
*
sizeof
(
Real
)
*
Dim
);
memset
(
data
,
0
,
sizeof
(
Real
)
*
nb_local_dofs
*
Dim
);
}
}
if
(
lm_my_proc_id
==
0
){
// je switch le pointeur pour les donnees locales
data_per_proc
[
0
]
=
data
;
for
(
UInt
i
=
1
;
i
<
lm_world_size
;
++
i
){
if
(
nb_local_per_proc
[
i
]
==
0
)
continue
;
if
(
data_per_proc
[
i
]
!=
NULL
)
data_per_proc
[
i
]
=
(
Real
*
)
realloc
(
data_per_proc
[
i
],
sizeof
(
Real
)
*
nb_local_per_proc
[
i
]
*
Dim
);
else
{
data_per_proc
[
i
]
=
(
Real
*
)
malloc
(
sizeof
(
Real
)
*
nb_local_per_proc
[
i
]
*
Dim
);
memset
(
data_per_proc
[
i
],
0
,
sizeof
(
Real
)
*
nb_local_per_proc
[
i
]
*
Dim
);
}
}
}
//chaqun construit en parallele les data qu'il veut envoyer
UInt
cpt
=
0
;
typedef
typename
Cont
::
Ref
RefPoint
;
typename
Cont
::
iterator
it
=
cont
.
getIterator
();
for
(
RefPoint
at
=
it
.
getFirst
();
!
it
.
end
();
at
=
it
.
getNext
(),
++
cpt
){
Real
X
[
Dim
];
at
.
getPositions0
(
X
);
for
(
UInt
i
=
0
;
i
<
Dim
;
++
i
)
data
[
cpt
*
Dim
+
i
]
=
X
[
i
];
}
DUMP
(
"a la fin cpt = "
<<
cpt
,
DBG_INFO
);
DUMP
(
"local P0 construction done"
,
DBG_INFO
);
DumpField
<
Dim
>
(
"P0"
,
file
);
cpt
=
0
;
for
(
RefPoint
at
=
it
.
getFirst
();
!
it
.
end
();
at
=
it
.
getNext
(),
++
cpt
){
Real
U
[
Dim
];
at
.
getDisplacements
(
U
);
for
(
UInt
i
=
0
;
i
<
Dim
;
++
i
)
data
[
cpt
*
Dim
+
i
]
=
U
[
i
];
}
DUMP
(
"local U construction done"
,
DBG_INFO
);
DumpField
<
Dim
>
(
"U"
,
file
);
cpt
=
0
;
for
(
RefPoint
at
=
it
.
getFirst
();
!
it
.
end
();
at
=
it
.
getNext
(),
++
cpt
)
{
Real
V
[
Dim
];
at
.
getVelocities
(
V
);
for
(
UInt
i
=
0
;
i
<
Dim
;
++
i
)
data
[
cpt
*
Dim
+
i
]
=
V
[
i
];
}
DUMP
(
"local V construction done"
,
DBG_INFO
);
DumpField
<
Dim
>
(
"V"
,
file
);
// on fait le champ de position P0
// je rassemble le tout en local
if
(
lm_my_proc_id
==
0
)
{
printTail
(
file
);
file
.
close
();
}
MPI_Barrier
(
MPI_COMM_WORLD
);
}
/* -------------------------------------------------------------------------- */
template
<
typename
_Input
>
template
<
UInt
Dim
>
inline
void
DumperRestart
<
_Input
>::
DumpField
(
const
std
::
string
&
fieldName
,
LMFile
&
file
){
MPI_Status
status
;
if
(
lm_my_proc_id
==
0
){
// I switch the pointer for local data
for
(
UInt
i
=
1
;
i
<
lm_world_size
;
++
i
){
if
(
nb_local_per_proc
[
i
]
==
0
)
continue
;
DUMP
(
"pending recv from "
<<
i
<<
" of size "
<<
nb_local_per_proc
[
i
]
<<
" ..."
,
DBG_INFO
);
MPI_Irecv
(
data_per_proc
[
i
],
nb_local_per_proc
[
i
]
*
Dim
,
MPI_DOUBLE
,
i
,
i
,
MPI_COMM_WORLD
,
&
requests
[
i
]);
DUMP
(
"pending recv from "
<<
i
<<
" of size "
<<
nb_local_per_proc
[
i
]
<<
" ... done"
,
DBG_INFO
);
}
}
else
if
(
nb_local_dofs
!=
0
){
DUMP
(
"sending to root "
<<
nb_local_dofs
<<
" ..."
,
DBG_INFO
);
MPI_Send
(
data
,
nb_local_dofs
*
Dim
,
MPI_DOUBLE
,
0
,
lm_my_proc_id
,
MPI_COMM_WORLD
);
DUMP
(
"sending to root "
<<
nb_local_dofs
<<
" ... done"
,
DBG_INFO
);
}
if
(
lm_my_proc_id
==
0
){
if
(
!
text_flag
)
b64
.
clearBuffer
();
file
.
printf
(
"<%s>
\n
"
,
fieldName
.
c_str
());
for
(
int
i
=
lm_world_size
-
1
;
i
>
-
1
;
--
i
){
DUMP
(
"waiting com from "
<<
i
,
DBG_INFO
);
//attend les coms avant d'ecrire sur disque
if
(
i
>
0
&&
nb_local_per_proc
[
i
]
>
0
)
MPI_Wait
(
&
requests
[
i
],
&
status
);
DUMP
(
"reception from "
<<
i
<<
" complete"
,
DBG_INFO
);
for
(
UInt
j
=
0
;
j
<
nb_local_per_proc
[
i
]
;
++
j
){
if
(
text_flag
){
for
(
UInt
k
=
0
;
k
<
Dim
;
++
k
){
file
.
printf
(
"%.15e"
,
data_per_proc
[
i
][
j
*
Dim
+
k
]);
if
(
k
!=
Dim
-
1
)
file
.
printf
(
"
\t
"
);
}
file
.
printf
(
"
\n
"
);
}
else
{
for
(
UInt
k
=
0
;
k
<
Dim
;
++
k
){
b64
.
pushRealInBase64
(
data_per_proc
[
i
][
j
*
Dim
+
k
]);
}
}
}
}
if
(
!
text_flag
)
{
b64
.
finish
();
b64
.
dumpToFile
();
}
file
.
printf
(
"</%s>
\n
"
,
fieldName
.
c_str
());
}
}
/* -------------------------------------------------------------------------- */
template
<
typename
_Input
>
void
DumperRestart
<
_Input
>::
printHeaders
(
UInt
nbDofs
,
const
UInt
Dim
,
LMFile
&
file
){
file
.
printf
(
"<?xml version=
\"
1.0
\"
encoding=
\"
UTF-8
\"
?>
\n
"
);
file
.
printf
(
"<SimulationData nbDofs=
\"
%d
\"
dim=
\"
%d
\"
timestep=
\"
%d
\"
"
,
nbDofs
,
Dim
,
current_step
);
if
(
text_flag
==
true
)
file
.
printf
(
" dataType=
\"
TEXT
\"
>
\n
"
);
else
file
.
printf
(
" dataType=
\"
BINARY
\"
>
\n
"
);
}
/* -------------------------------------------------------------------------- */
template
<
typename
_Input
>
void
DumperRestart
<
_Input
>::
printTail
(
LMFile
&
file
){
file
.
printf
(
"</SimulationData>
\n
"
);
}
/* -------------------------------------------------------------------------- */
/* LMDESC RESTART
This dumper saves the state of the simulation in order to allow restarting
simulations from
*/
/* LMEXAMPLE DUMPER res RESTART INPUT md PREFIX /home/titeuf */
/* LMHERITANCE dumper */
template
<
typename
_Input
>
void
DumperRestart
<
_Input
>::
declareParams
(){
Dumper
<
_Input
>::
declareParams
();
/* LMKEYWORD TEXT
Flag to request human-readable text output instead of binary.
*/
this
->
parseTag
(
"TEXT"
,
text_flag
,
false
);
}
/* -------------------------------------------------------------------------- */
DECLARE_DUMPER_REF
(
DumperRestart
,
LIST_ATOM_MODEL
);
DECLARE_DUMPER_REF
(
DumperRestart
,
LIST_CONTINUUM_MODEL
);
DECLARE_DUMPER_REF
(
DumperRestart
,
LIST_DD_MODEL
);
DECLARE_DUMPER_REFPOINT
(
DumperRestart
);
DECLARE_DUMPER_GENERIC_MESH
(
DumperRestart
);
__END_LIBMULTISCALE__
Event Timeline
Log In to Comment