Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F66724448
simulation.cc
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Wed, Jun 12, 15:43
Size
4 KB
Mime Type
text/x-c
Expires
Fri, Jun 14, 15:43 (2 d)
Engine
blob
Format
Raw Data
Handle
18267529
Attached To
rSCINTROPARALLEL Poisson code for introduction to parallelism
simulation.cc
View Options
/* -------------------------------------------------------------------------- */
#include "simulation.hh"
/* -------------------------------------------------------------------------- */
#include <cmath>
#include <iostream>
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
Simulation
::
Simulation
(
int
m
,
int
n
,
MPI_Comm
communicator
)
:
m_global_m
(
m
),
m_global_n
(
n
),
m_epsilon
(
1e-7
),
m_h_m
(
1.
/
m
),
m_h_n
(
1.
/
n
),
m_dumper
(
new
DumperBinary
(
m_grids
.
old
(),
communicator
)),
m_communicator
(
communicator
)
{
MPI_Comm_rank
(
m_communicator
,
&
m_prank
);
MPI_Comm_size
(
m_communicator
,
&
m_psize
);
m_local_m
=
m
/
m_psize
+
(
m_prank
<
m
%
m_psize
?
1
:
0
);
m_local_n
=
n
;
// adding ghosts
if
(
m_psize
>
1
)
m_local_m
+=
(
m_prank
==
0
||
m_prank
==
m_psize
-
1
)
?
1
:
2
;
m_offset_m
=
(
m
/
m_psize
)
*
m_prank
+
(
m_prank
<
m
%
m_psize
?
m_prank
:
m
%
m_psize
);
m_offset_n
=
0
;
m_grids
.
resize
(
m_local_m
,
m_local_n
);
m_f
.
resize
(
m_local_m
,
m_local_n
);
m_north_prank
=
(
m_prank
==
0
?
MPI_PROC_NULL
:
m_prank
-
1
);
m_south_prank
=
(
m_prank
==
(
m_psize
-
1
)
?
MPI_PROC_NULL
:
m_prank
+
1
);
// std::cout << m_prank << " " << m_global_m << " " << m_global_n << " "
// << m_local_m << " " << m_local_n << " " << m_offset_m << " "
// << m_offset_n << " " << m_north_prank << " " << m_south_prank
// << std::endl;
}
/* -------------------------------------------------------------------------- */
void
Simulation
::
set_initial_conditions
()
{
for
(
int
i
=
0
;
i
<
m_local_m
;
i
++
)
{
for
(
int
j
=
0
;
j
<
m_local_n
;
j
++
)
{
m_f
(
i
,
j
)
=
-
2.
*
100.
*
M_PI
*
M_PI
*
std
::
sin
(
10.
*
M_PI
*
(
m_offset_m
+
i
)
*
m_h_m
)
*
std
::
sin
(
10.
*
M_PI
*
(
m_offset_n
+
j
)
*
m_h_n
);
}
}
}
/* -------------------------------------------------------------------------- */
std
::
tuple
<
float
,
int
>
Simulation
::
compute
()
{
int
s
=
0
;
float
l2
=
0
;
do
{
l2
=
compute_step
();
m_grids
.
swap
();
// m_dumper->dump(s);
++
s
;
}
while
(
l2
>
m_epsilon
);
return
std
::
make_tuple
(
l2
,
s
);
}
/* -------------------------------------------------------------------------- */
void
Simulation
::
set_epsilon
(
float
epsilon
)
{
m_epsilon
=
epsilon
;
}
/* -------------------------------------------------------------------------- */
float
Simulation
::
epsilon
()
const
{
return
m_epsilon
;
}
/* -------------------------------------------------------------------------- */
inline
float
Simulation
::
compute_row
(
int
i
)
{
float
l2
=
0
;
Grid
&
u
=
m_grids
.
current
();
Grid
&
uo
=
m_grids
.
old
();
for
(
int
j
=
1
;
j
<
m_local_n
-
1
;
j
++
)
{
// computation of the new step
u
(
i
,
j
)
=
0.25
*
(
uo
(
i
-
1
,
j
)
+
uo
(
i
+
1
,
j
)
+
uo
(
i
,
j
-
1
)
+
uo
(
i
,
j
+
1
)
-
m_f
(
i
,
j
)
*
m_h_m
*
m_h_n
);
// L2 norm
l2
+=
(
uo
(
i
,
j
)
-
u
(
i
,
j
))
*
(
uo
(
i
,
j
)
-
u
(
i
,
j
));
}
return
l2
;
}
/* -------------------------------------------------------------------------- */
float
Simulation
::
compute_step
()
{
float
l2
=
0.
;
Grid
&
uo
=
m_grids
.
old
();
MPI_Irecv
(
&
uo
(
0
,
0
),
m_local_n
,
MPI_FLOAT
,
m_north_prank
,
0
,
m_communicator
,
&
m_requests
[
0
]);
MPI_Irecv
(
&
uo
(
m_local_m
-
1
,
0
),
m_local_n
,
MPI_FLOAT
,
m_south_prank
,
0
,
m_communicator
,
&
m_requests
[
1
]);
MPI_Isend
(
&
uo
(
1
,
0
),
m_local_n
,
MPI_FLOAT
,
m_north_prank
,
0
,
m_communicator
,
&
m_requests
[
2
]);
MPI_Isend
(
&
uo
(
m_local_m
-
2
,
0
),
m_local_n
,
MPI_FLOAT
,
m_south_prank
,
0
,
m_communicator
,
&
m_requests
[
3
]);
for
(
int
i
=
2
;
i
<
m_local_m
-
2
;
i
++
)
{
l2
+=
compute_row
(
i
);
}
/// wait to receive the ghosts before using them for them computation
MPI_Waitall
(
2
,
m_requests
.
data
(),
MPI_STATUS_IGNORE
);
l2
+=
compute_row
(
1
);
l2
+=
compute_row
(
m_local_m
-
2
);
/// wait to send everything before changing the buffers
MPI_Waitall
(
2
,
m_requests
.
data
()
+
2
,
MPI_STATUS_IGNORE
);
MPI_Allreduce
(
MPI_IN_PLACE
,
&
l2
,
1
,
MPI_FLOAT
,
MPI_SUM
,
MPI_COMM_WORLD
);
return
l2
;
}
Event Timeline
Log In to Comment