Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F99958560
communicator_mpi_linear.cc
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Mon, Jan 27, 11:34
Size
19 KB
Mime Type
text/x-c
Expires
Wed, Jan 29, 11:34 (1 d, 23 h)
Engine
blob
Format
Raw Data
Handle
23823258
Attached To
rLIBMULTISCALE LibMultiScale
communicator_mpi_linear.cc
View Options
/**
* @file communicator_mpi_linear.cc
*
* @author Guillaume Anciaux <guillaume.anciaux@epfl.ch>
*
* @date Mon Jul 28 15:36:04 2014
*
* @brief This is the implementation of the LM communicator using MPI where models are distributed over processors
*
* @section LICENSE
*
* Copyright (©) 2010-2011 EPFL (Ecole Polytechnique Fédérale de Lausanne)
* Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
*
* LibMultiScale is free software: you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License as published by the Free
* Software Foundation, either version 3 of the License, or (at your option) any
* later version.
*
* LibMultiScale is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with LibMultiScale. If not, see <http://www.gnu.org/licenses/>.
*
*/
#define TIMER
#include "lm_common.hh"
#include "communicator_mpi_linear.hh"
#include "geometry.hh"
#include "ball.hh"
#include "cube.hh"
#include <cstring>
/* -------------------------------------------------------------------------- */
__BEGIN_LIBMULTISCALE__
/* -------------------------------------------------------------------------- */
LinearMPI
::~
LinearMPI
(){
// MPI_Type_free(&geom_type);
}
/* -------------------------------------------------------------------------- */
LinearMPI
::
LinearMPI
(){
DUMP
(
"creating linear mpi communicateur"
,
DBG_INFO
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
n_procs
);
free_procs
=
(
UInt
)
n_procs
;
nb_groups
=
0
;
goffsets
[
0
]
=
0
;
// creation des types specifiques au transport de mes objets
int
blocks
[
3
];
MPI_Aint
disp
[
3
];
MPI_Datatype
types
[
3
];
blocks
[
0
]
=
2
;
types
[
0
]
=
MPI_UNSIGNED
;
blocks
[
1
]
=
11
;
types
[
1
]
=
MPI_DOUBLE
;
blocks
[
2
]
=
1
;
types
[
2
]
=
MPI_UB
;
mpi_geom
MpiGeom
;
void
*
add
=
&
MpiGeom
.
type
;
MPI_Get_address
(
add
,
disp
);
add
=
MpiGeom
.
center
;
MPI_Get_address
(
add
,
disp
+
1
);
add
=
&
MpiGeom
+
1
;
MPI_Get_address
(
add
,
disp
+
2
);
MPI_Aint
base
=
disp
[
0
];
for
(
UInt
i
=
0
;
i
<
3
;
++
i
)
disp
[
i
]
-=
base
;
// for (UInt i=0;i < 3;++i){
//DUMP("deplacement calcule pour voir : "
//<< disp[i] << " et base = " << base);
//}
MPI_Type_struct
(
3
,
blocks
,
disp
,
types
,
&
geom_type
);
MPI_Type_commit
(
&
geom_type
);
sequence_number
.
resize
(
lm_world_size
);
for
(
UInt
i
=
0
;
i
<
lm_world_size
;
++
i
)
sequence_number
[
0
]
=
0
;
}
/* -------------------------------------------------------------------------- */
UInt
LinearMPI
::
addGroup
(
UInt
nb_procs
){
/* TODO : faire le parser pour le domaine global */
/* en attendant je creer deux sous group de avec un group de 1 pour */
/* l'elasticite lineaire */
int
color
;
if
(
free_procs
<
nb_procs
){
LM_FATAL
(
"While trying to add "
<<
nb_procs
<<
" procs: There are not enough processors for the required topology!"
<<
"(only "
<<
free_procs
<<
" are available)"
);
}
DUMP
(
"Building processor group number "
<<
nb_groups
,
DBG_INFO
);
if
(
lm_my_proc_id
<
free_procs
&&
lm_my_proc_id
>=
free_procs
-
nb_procs
)
color
=
0
;
else
color
=
MPI_UNDEFINED
;
DUMP
(
"free_procs "
<<
free_procs
<<
" nb_procs "
<<
nb_procs
<<
" diff "
<<
free_procs
-
nb_procs
<<
" color "
<<
color
,
DBG_ALL
);
MPI_Comm_split
(
MPI_COMM_WORLD
,
color
,
lm_my_proc_id
,
&
groups
[
nb_groups
]);
DUMP
(
"group pointer ("
<<
nb_groups
<<
")="
<<
groups
[
nb_groups
],
DBG_DETAIL
);
MPI_Barrier
(
MPI_COMM_WORLD
);
DUMP
(
"processor "
<<
lm_my_proc_id
<<
" group "
<<
nb_groups
<<
" built"
,
DBG_INFO
);
taille_groups
[
nb_groups
]
=
nb_procs
;
free_procs
-=
nb_procs
;
goffsets
[
nb_groups
]
=
free_procs
;
++
nb_groups
;
return
(
nb_groups
-
1
);
};
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
sendLocalGeometriesToGroup
(
Geometry
&
geom
,
CommGroup
group
){
mpi_geom
s
;
s
.
type
=
geom
.
getType
();
DUMP
(
"sending geometry of type "
<<
s
.
type
<<
" "
<<
geom
,
DBG_INFO
);
s
.
Dim
=
geom
.
getDim
();
s
.
center
[
0
]
=
geom
.
getCenter
(
0
);
s
.
center
[
1
]
=
geom
.
getCenter
(
1
);
s
.
center
[
2
]
=
geom
.
getCenter
(
2
);
switch
(
s
.
type
){
case
Geometry
::
BALL:
{
Ball
&
b
=
static_cast
<
Ball
&>
(
geom
);
s
.
rmin
=
b
.
rMin
();
s
.
rmax
=
b
.
rMax
();
}
break
;
case
Geometry
::
CUBE:
{
Cube
&
c
=
dynamic_cast
<
Cube
&>
(
geom
);
s
.
xmax
=
c
.
getXmax
()[
0
];
s
.
xmin
=
c
.
getXmin
()[
0
];
s
.
ymax
=
c
.
getXmax
()[
1
];
s
.
ymin
=
c
.
getXmin
()[
1
];
s
.
zmax
=
c
.
getXmax
()[
2
];
s
.
zmin
=
c
.
getXmin
()[
2
];
}
break
;
default
:
LM_FATAL
(
"geometry "
<<
s
.
type
<<
" cannot be sent through network (to be implemented?)"
);
break
;
}
for
(
UInt
i
=
0
;
i
<
taille_groups
[
group
.
getID
()]
;
++
i
){
DUMP
(
"sending geometry to "
<<
i
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
i
+
goffsets
[
group
.
getID
()]],
DBG_INFO
);
MPI_Send
(
&
s
,
1
,
geom_type
,
i
+
goffsets
[
group
.
getID
()],
sequence_number
[
i
+
goffsets
[
group
.
getID
()]],
MPI_COMM_WORLD
);
DUMP
(
"geometry Sent to "
<<
i
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
i
+
goffsets
[
group
.
getID
()]],
DBG_INFO
);
++
sequence_number
[
i
+
goffsets
[
group
.
getID
()]];
}
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
receiveLocalGeometriesFromGroup
(
Geometry
**
geom_tab
,
CommGroup
group
){
if
(
!
geom_tab
){
LM_FATAL
(
"pointeur de geometrie non valide"
);}
if
(
group
==
group_all
||
group
==
group_none
)
LM_FATAL
(
"invalid communicator"
);
if
(
group
.
getID
()
>=
(
int
)
nb_groups
){
LM_FATAL
(
"le group specifie existe pas : "
<<
group
);}
mpi_geom
*
s
=
new
mpi_geom
[
taille_groups
[
group
.
getID
()]];
memset
(
s
,
0
,
sizeof
(
mpi_geom
)
*
taille_groups
[
group
.
getID
()]);
DUMP
(
"normalement je m'aprete a recevoir "
<<
taille_groups
[
group
.
getID
()]
<<
" structure mpi_geom"
,
DBG_DETAIL
);
// MPI_Barrier(MPI_COMM_WORLD);
//reception des messages de l'autre group
MPI_Status
status
;
for
(
UInt
i
=
0
;
i
<
taille_groups
[
group
.
getID
()]
;
++
i
){
DUMP
(
"receiving geometry from "
<<
i
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
i
+
goffsets
[
group
.
getID
()]],
DBG_INFO
);
MPI_Recv
(
&
s
[
i
],
1
,
geom_type
,
goffsets
[
group
.
getID
()]
+
i
,
sequence_number
[
i
+
goffsets
[
group
.
getID
()]],
MPI_COMM_WORLD
,
&
status
);
DUMP
(
"geometry Received from "
<<
i
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
i
+
goffsets
[
group
.
getID
()]],
DBG_INFO
);
++
sequence_number
[
i
+
goffsets
[
group
.
getID
()]];
}
// conversion des structure MPI en objets concrets
for
(
UInt
i
=
0
;
i
<
taille_groups
[
group
.
getID
()]
;
++
i
){
DUMP
(
"recv geometry of type "
<<
s
[
i
].
type
,
DBG_INFO
);
switch
(
s
[
i
].
type
){
case
Geometry
::
BALL:
geom_tab
[
i
]
=
new
Ball
(
s
[
i
].
Dim
);
geom_tab
[
i
]
->
setCenter
(
s
[
i
].
center
[
0
],
s
[
i
].
center
[
1
],
s
[
i
].
center
[
2
]);
((
Ball
*
)
geom_tab
[
i
])
->
setRayons
(
s
[
i
].
rmin
,
s
[
i
].
rmax
);
break
;
case
Geometry
::
CUBE:
geom_tab
[
i
]
=
new
Cube
(
s
[
i
].
Dim
);
//geom_tab[i]->SetCenter(s[i].center[0],s[i].center[1],s[i].center[2]);
dynamic_cast
<
Cube
*>
(
geom_tab
[
i
])
->
setDimensions
(
s
[
i
].
xmin
,
s
[
i
].
xmax
,
s
[
i
].
ymin
,
s
[
i
].
ymax
,
s
[
i
].
zmin
,
s
[
i
].
zmax
);
break
;
default
:
LM_FATAL
(
"structure MPI indecodable -> type = "
<<
s
[
i
].
type
<<
" received from "
<<
i
+
goffsets
[
group
.
getID
()]);
}
}
delete
[]
s
;
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
sendCommunicationTable
(
std
::
vector
<
UInt
>
&
com_with
,
CommGroup
destgroup
){
//ici les tableaux a envoyer ont la dimension de la taille de destgroup
UInt
desttaille
=
taille_groups
[
destgroup
.
getID
()];
for
(
UInt
i
=
0
;
i
<
desttaille
;
++
i
){
// UInt size = requests.size();
//requests.resize(size+1);
// MPI_Isend(&com_with[i],1,MPI_INT,i+goffsets[destgroup],
//sequence_number[i+goffsets[destgroup]],MPI_COMM_WORLD,&requests[size]);
DUMP
(
"Sending "
<<
com_with
[
i
]
<<
" to "
<<
i
+
goffsets
[
destgroup
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
i
+
goffsets
[
destgroup
.
getID
()]],
DBG_INFO
);
MPI_Send
(
&
com_with
[
i
],
1
,
MPI_INT
,
i
+
goffsets
[
destgroup
.
getID
()],
sequence_number
[
i
+
goffsets
[
destgroup
.
getID
()]],
MPI_COMM_WORLD
);
DUMP
(
"Sent "
<<
com_with
[
i
]
<<
" to "
<<
i
+
goffsets
[
destgroup
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
i
+
goffsets
[
destgroup
.
getID
()]],
DBG_INFO
);
++
sequence_number
[
i
+
goffsets
[
destgroup
.
getID
()]];
}
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
receiveCommunicationTable
(
std
::
vector
<
UInt
>
&
com_with
,
CommGroup
fromgroup
){
//ici les tableaux a remplir ont la taille de fromgroup
UInt
fromtaille
=
taille_groups
[
fromgroup
.
getID
()];
MPI_Status
status
;
for
(
UInt
i
=
0
;
i
<
fromtaille
;
++
i
){
DUMP
(
"Receiving from "
<<
i
+
goffsets
[
fromgroup
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
i
+
goffsets
[
fromgroup
.
getID
()]],
DBG_INFO
);
MPI_Recv
(
&
com_with
[
i
],
1
,
MPI_INT
,
i
+
goffsets
[
fromgroup
.
getID
()],
sequence_number
[
i
+
goffsets
[
fromgroup
.
getID
()]],
MPI_COMM_WORLD
,
&
status
);
DUMP
(
"Received "
<<
com_with
[
i
]
<<
" from "
<<
i
+
goffsets
[
fromgroup
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
i
+
goffsets
[
fromgroup
.
getID
()]],
DBG_INFO
);
++
sequence_number
[
i
+
goffsets
[
fromgroup
.
getID
()]];
}
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
sendReals
(
CommBuffer
<
Real
>
&
d
,
UInt
nb
,
UInt
dest
,
CommGroup
group
,
const
std
::
string
&
buf
){
DUMP
(
"sending "
<<
nb
<<
" Reals to "
<<
dest
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
dest
+
goffsets
[
group
.
getID
()]]
<<
" for "
<<
buf
,
DBG_INFO
);
MPI_Send
(
d
.
buffer
(),
nb
,
MPI_DOUBLE
,
dest
+
goffsets
[
group
.
getID
()],
sequence_number
[
dest
+
goffsets
[
group
.
getID
()]],
MPI_COMM_WORLD
);
DUMP
(
"sent "
<<
nb
<<
" Reals to "
<<
dest
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
dest
+
goffsets
[
group
.
getID
()]]
<<
" for "
<<
buf
,
DBG_INFO
);
++
sequence_number
[
dest
+
goffsets
[
group
.
getID
()]];
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
receiveReals
(
CommBuffer
<
Real
>
&
d
,
UInt
nb
,
UInt
from
,
CommGroup
group
,
const
std
::
string
&
buf
){
MPI_Status
status
;
DUMP
(
"probe reception of reals from "
<<
from
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
from
+
goffsets
[
group
.
getID
()]]
<<
" for "
<<
buf
,
DBG_INFO
);
if
(
nb
==
UINT_MAX
){
MPI_Probe
(
from
+
goffsets
[
group
.
getID
()],
sequence_number
[
from
+
goffsets
[
group
.
getID
()]],
MPI_COMM_WORLD
,
&
status
);
int
nb_tmp
;
MPI_Get_count
(
&
status
,
MPI_DOUBLE
,
&
nb_tmp
);
nb
=
nb_tmp
;
}
DUMP
(
"receiving "
<<
nb
<<
" Reals from "
<<
from
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
from
+
goffsets
[
group
.
getID
()]]
<<
" for "
<<
buf
,
DBG_INFO
);
d
.
append
(
nb
);
MPI_Recv
(
*
d
,
nb
,
MPI_DOUBLE
,
from
+
goffsets
[
group
.
getID
()],
sequence_number
[
from
+
goffsets
[
group
.
getID
()]],
MPI_COMM_WORLD
,
&
status
);
DUMP
(
"received "
<<
nb
<<
" Reals from "
<<
from
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
from
+
goffsets
[
group
.
getID
()]]
<<
" for "
<<
buf
,
DBG_INFO
);
++
sequence_number
[
from
+
goffsets
[
group
.
getID
()]];
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
sendUInts
(
CommBuffer
<
UInt
>
&
i
,
UInt
nb
,
UInt
dest
,
CommGroup
group
,
const
std
::
string
&
buf
){
DUMP
(
"sending "
<<
nb
<<
" integers to "
<<
dest
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
dest
+
goffsets
[
group
.
getID
()]]
<<
" for "
<<
buf
,
DBG_INFO
);
MPI_Send
(
i
.
buffer
(),
nb
,
MPI_INT
,
dest
+
goffsets
[
group
.
getID
()],
sequence_number
[
dest
+
goffsets
[
group
.
getID
()]],
MPI_COMM_WORLD
);
DUMP
(
"sent "
<<
nb
<<
" integers to "
<<
dest
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
dest
+
goffsets
[
group
.
getID
()]]
<<
" for "
<<
buf
,
DBG_INFO
);
++
sequence_number
[
dest
+
goffsets
[
group
.
getID
()]];
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
receiveUInts
(
CommBuffer
<
UInt
>
&
i
,
UInt
nb
,
UInt
from
,
CommGroup
group
,
const
std
::
string
&
buf
){
MPI_Status
status
;
STARTTIMER
(
"MPI_probe"
);
DUMP
(
"probe reception of integers from "
<<
from
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
from
+
goffsets
[
group
.
getID
()]]
<<
" for "
<<
buf
,
DBG_INFO
);
if
(
nb
==
UINT_MAX
){
MPI_Probe
(
from
+
goffsets
[
group
.
getID
()],
sequence_number
[
from
+
goffsets
[
group
.
getID
()]],
MPI_COMM_WORLD
,
&
status
);
int
nb_tmp
=
0
;
MPI_Get_count
(
&
status
,
MPI_INT
,
&
nb_tmp
);
nb
=
nb_tmp
;
}
STOPTIMER
(
"MPI_probe"
);
DUMP
(
"receiving "
<<
nb
<<
" integers from "
<<
from
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
from
+
goffsets
[
group
.
getID
()]]
<<
" for "
<<
buf
,
DBG_INFO
);
STARTTIMER
(
"MPI_append"
);
i
.
append
(
nb
);
STOPTIMER
(
"MPI_append"
);
STARTTIMER
(
"MPI_receive"
);
MPI_Recv
(
*
i
,
nb
,
MPI_INT
,
from
+
goffsets
[
group
.
getID
()],
sequence_number
[
from
+
goffsets
[
group
.
getID
()]],
MPI_COMM_WORLD
,
&
status
);
STOPTIMER
(
"MPI_receive"
);
DUMP
(
"received "
<<
nb
<<
" integers from "
<<
from
+
goffsets
[
group
.
getID
()]
<<
" - seq number "
<<
sequence_number
[
from
+
goffsets
[
group
.
getID
()]]
<<
" for "
<<
buf
,
DBG_INFO
);
++
sequence_number
[
from
+
goffsets
[
group
.
getID
()]];
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
waitForPendingComs
(){
MPI_Status
status
;
for
(
UInt
i
=
0
;
i
<
requests
.
size
();
++
i
)
MPI_Wait
(
&
requests
[
i
],
&
status
);
requests
.
clear
();
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
reduceUInt
(
CommBuffer
<
UInt
>
&
contrib
,
UInt
nb
,
CommGroup
group
,
const
std
::
string
&
comment
,
Operator
op
){
MPI_Op
mpi_op
;
switch
(
op
){
case
OP_SUM:
mpi_op
=
MPI_SUM
;
break
;
default
:
LM_FATAL
(
"unknown operator "
<<
op
);
}
if
(
!
amIinGroup
(
group
)){
LM_FATAL
(
"reduction can't be made if not a member of the group: "
<<
comment
);
}
CommBuffer
<
UInt
>
result
;
result
.
resize
(
nb
);
MPI_Reduce
(
contrib
.
buffer
(),
result
.
buffer
(),
nb
,
MPI_INT
,
mpi_op
,
0
,
groups
[
group
.
getID
()]);
contrib
=
result
;
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
reduceReal
(
CommBuffer
<
Real
>
&
contrib
,
UInt
nb
,
CommGroup
group
,
const
std
::
string
&
comment
,
Operator
op
){
MPI_Op
mpi_op
;
switch
(
op
){
case
OP_SUM:
mpi_op
=
MPI_SUM
;
break
;
default
:
LM_FATAL
(
"unknown operator "
<<
op
);
}
if
(
!
amIinGroup
(
group
)){
LM_FATAL
(
"reduction can't be made if not a member of the group: "
<<
comment
);
}
CommBuffer
<
Real
>
result
;
result
.
resize
(
nb
);
MPI_Reduce
(
contrib
.
buffer
(),
result
.
buffer
(),
nb
,
MPI_DOUBLE
,
mpi_op
,
0
,
groups
[
group
.
getID
()]);
contrib
=
result
;
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
allReduceUInt
(
CommBuffer
<
UInt
>
&
contrib
,
UInt
nb
,
CommGroup
group
,
const
std
::
string
&
comment
,
Operator
op
){
MPI_Op
mpi_op
;
switch
(
op
){
case
OP_SUM:
mpi_op
=
MPI_SUM
;
break
;
default
:
LM_FATAL
(
"unknown operator "
<<
op
);
}
if
(
!
amIinGroup
(
group
)){
LM_FATAL
(
"reduction can't be made if not a member of the group: "
<<
comment
);
}
CommBuffer
<
UInt
>
result
;
result
.
resize
(
nb
);
MPI_Allreduce
(
contrib
.
buffer
(),
result
.
buffer
(),
nb
,
MPI_INT
,
mpi_op
,
groups
[
group
.
getID
()]);
contrib
=
result
;
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
allReduceReal
(
CommBuffer
<
Real
>
&
contrib
,
UInt
nb
,
CommGroup
group
,
const
std
::
string
&
comment
,
Operator
op
){
MPI_Op
mpi_op
;
switch
(
op
){
case
OP_SUM:
mpi_op
=
MPI_SUM
;
break
;
default
:
LM_FATAL
(
"unknown operator "
<<
op
);
}
if
(
!
amIinGroup
(
group
)){
LM_FATAL
(
"reduction can't be made if not a member of the group: "
<<
comment
);
}
CommBuffer
<
Real
>
result
;
result
.
resize
(
nb
);
MPI_Allreduce
(
contrib
.
buffer
(),
result
.
buffer
(),
nb
,
MPI_DOUBLE
,
mpi_op
,
groups
[
group
.
getID
()]);
contrib
=
result
;
}
/* -------------------------------------------------------------------------- */
UInt
LinearMPI
::
getNBGroups
()
{
return
nb_groups
;
}
/* -------------------------------------------------------------------------- */
bool
LinearMPI
::
amIinGroup
(
CommGroup
group
){
if
(
group
==
group_all
)
return
true
;
if
(
group
==
group_none
)
return
false
;
DUMP
(
"testing if i am in "
<<
group
<<
" : "
<<
(
group
.
getID
()
<
(
int
)
nb_groups
&&
groups
[
group
.
getID
()]
!=
MPI_COMM_NULL
),
DBG_DETAIL
);
if
(
group
.
getID
()
>=
(
int
)
nb_groups
)
return
false
;
return
(
groups
[
group
.
getID
()]
!=
MPI_COMM_NULL
);
}
/* -------------------------------------------------------------------------- */
bool
LinearMPI
::
isInGroup
(
UInt
i
,
CommGroup
group
){
if
(
group
==
group_all
)
return
true
;
if
(
group
==
group_none
)
return
false
;
return
(
i
>=
goffsets
[
group
.
getID
()]
&&
i
<
goffsets
[
group
.
getID
()]
+
taille_groups
[
group
.
getID
()]);
}
/* -------------------------------------------------------------------------- */
MPI_Comm
LinearMPI
::
getMpiGroup
(
CommGroup
group
){
return
groups
[
group
.
getID
()];
}
/* -------------------------------------------------------------------------- */
UInt
LinearMPI
::
getNBprocsOnGroup
(
CommGroup
group
)
{
LM_ASSERT
(
group
!=
group_invalid
,
"invalid group"
);
if
(
group
==
group_all
)
return
n_procs
;
if
(
group
==
group_none
)
return
0
;
return
taille_groups
[
group
.
getID
()];
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
synchronize
(
CommGroup
group_index
){
STARTTIMER
(
"waiting"
);
DUMP
(
"group_index "
<<
group_index
,
DBG_INFO
);
if
(
group_index
==
group_all
)
MPI_Barrier
(
MPI_COMM_WORLD
);
else
MPI_Barrier
(
groups
[
group_index
.
getID
()]);
STOPTIMER
(
"waiting"
);
}
/* -------------------------------------------------------------------------- */
UInt
LinearMPI
::
realRank
(
UInt
i
,
CommGroup
group
){
return
i
+
goffsets
[
group
.
getID
()];
}
/* -------------------------------------------------------------------------- */
UInt
LinearMPI
::
groupRank
(
UInt
i
,
CommGroup
group
){
if
(
group
==
group_all
)
return
i
;
if
(
group
==
group_none
)
LM_FATAL
(
"cannot request rank from group_none"
);
if
(
isInGroup
(
i
,
group
))
return
i
-
goffsets
[
group
.
getID
()];
else
{
LM_FATAL
(
"requested global rank "
<<
i
<<
" not in logical subgroup "
<<
group
);
}
}
/* -------------------------------------------------------------------------- */
void
LinearMPI
::
printself
(
std
::
ostream
&
stream
){
stream
<<
"LinearMPI object "
<<
std
::
endl
;
stream
<<
"free procs "
<<
free_procs
<<
std
::
endl
;
stream
<<
"nb_groups "
<<
nb_groups
<<
std
::
endl
;
for
(
UInt
i
=
0
;
i
<
nb_groups
;
++
i
){
stream
<<
"size_group["
<<
i
<<
"] = "
<<
taille_groups
[
i
]
<<
std
::
endl
;
stream
<<
"goffsets["
<<
i
<<
"] = "
<<
goffsets
[
i
]
<<
std
::
endl
;
stream
<<
"groups["
<<
i
<<
"] = "
<<
groups
[
i
]
<<
std
::
endl
;
CommGroup
tmp
(
i
);
stream
<<
"am I in group "
<<
i
<<
" ? "
<<
amIinGroup
(
tmp
)
<<
std
::
endl
;
}
}
/* -------------------------------------------------------------------------- */
__END_LIBMULTISCALE__
Event Timeline
Log In to Comment