Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F91716346
pair_lj_cut_coul_msm_gpu.cpp
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Wed, Nov 13, 19:24
Size
10 KB
Mime Type
text/x-c
Expires
Fri, Nov 15, 19:24 (2 d)
Engine
blob
Format
Raw Data
Handle
22313206
Attached To
rLAMMPS lammps
pair_lj_cut_coul_msm_gpu.cpp
View Options
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Trung Dac Nguyen (ORNL)
------------------------------------------------------------------------- */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "pair_lj_cut_coul_msm_gpu.h"
#include "atom.h"
#include "atom_vec.h"
#include "comm.h"
#include "force.h"
#include "neighbor.h"
#include "neigh_list.h"
#include "integrate.h"
#include "memory.h"
#include "error.h"
#include "neigh_request.h"
#include "universe.h"
#include "update.h"
#include "domain.h"
#include "kspace.h"
#include <string.h>
#include "gpu_extra.h"
using
namespace
LAMMPS_NS
;
// External functions from cuda library for atom decomposition
int
ljcm_gpu_init
(
const
int
ntypes
,
double
**
cutsq
,
double
**
host_lj1
,
double
**
host_lj2
,
double
**
host_lj3
,
double
**
host_lj4
,
double
**
host_gcons
,
double
**
host_dgcons
,
double
**
offset
,
double
*
special_lj
,
const
int
inum
,
const
int
nall
,
const
int
max_nbors
,
const
int
maxspecial
,
const
double
cell_size
,
int
&
gpu_mode
,
FILE
*
screen
,
double
**
host_cut_ljsq
,
double
host_cut_coulsq
,
double
*
host_special_coul
,
const
int
order
,
const
double
qqrd2e
);
void
ljcm_gpu_clear
();
int
**
ljcm_gpu_compute_n
(
const
int
ago
,
const
int
inum
,
const
int
nall
,
double
**
host_x
,
int
*
host_type
,
double
*
sublo
,
double
*
subhi
,
tagint
*
tag
,
int
**
nspecial
,
tagint
**
special
,
const
bool
eflag
,
const
bool
vflag
,
const
bool
eatom
,
const
bool
vatom
,
int
&
host_start
,
int
**
ilist
,
int
**
jnum
,
const
double
cpu_time
,
bool
&
success
,
double
*
host_q
,
double
*
boxlo
,
double
*
prd
);
void
ljcm_gpu_compute
(
const
int
ago
,
const
int
inum
,
const
int
nall
,
double
**
host_x
,
int
*
host_type
,
int
*
ilist
,
int
*
numj
,
int
**
firstneigh
,
const
bool
eflag
,
const
bool
vflag
,
const
bool
eatom
,
const
bool
vatom
,
int
&
host_start
,
const
double
cpu_time
,
bool
&
success
,
double
*
host_q
,
const
int
nlocal
,
double
*
boxlo
,
double
*
prd
);
double
ljcm_gpu_bytes
();
/* ---------------------------------------------------------------------- */
PairLJCutCoulMSMGPU
::
PairLJCutCoulMSMGPU
(
LAMMPS
*
lmp
)
:
PairLJCutCoulMSM
(
lmp
),
gpu_mode
(
GPU_FORCE
)
{
respa_enable
=
0
;
reinitflag
=
0
;
cpu_time
=
0.0
;
GPU_EXTRA
::
gpu_ready
(
lmp
->
modify
,
lmp
->
error
);
}
/* ----------------------------------------------------------------------
free all arrays
------------------------------------------------------------------------- */
PairLJCutCoulMSMGPU
::~
PairLJCutCoulMSMGPU
()
{
ljcm_gpu_clear
();
}
/* ---------------------------------------------------------------------- */
void
PairLJCutCoulMSMGPU
::
compute
(
int
eflag
,
int
vflag
)
{
if
(
eflag
||
vflag
)
ev_setup
(
eflag
,
vflag
);
else
evflag
=
vflag_fdotr
=
0
;
int
nall
=
atom
->
nlocal
+
atom
->
nghost
;
int
inum
,
host_start
;
bool
success
=
true
;
int
*
ilist
,
*
numneigh
,
**
firstneigh
;
if
(
gpu_mode
!=
GPU_FORCE
)
{
inum
=
atom
->
nlocal
;
firstneigh
=
ljcm_gpu_compute_n
(
neighbor
->
ago
,
inum
,
nall
,
atom
->
x
,
atom
->
type
,
domain
->
sublo
,
domain
->
subhi
,
atom
->
tag
,
atom
->
nspecial
,
atom
->
special
,
eflag
,
vflag
,
eflag_atom
,
vflag_atom
,
host_start
,
&
ilist
,
&
numneigh
,
cpu_time
,
success
,
atom
->
q
,
domain
->
boxlo
,
domain
->
prd
);
}
else
{
inum
=
list
->
inum
;
ilist
=
list
->
ilist
;
numneigh
=
list
->
numneigh
;
firstneigh
=
list
->
firstneigh
;
ljcm_gpu_compute
(
neighbor
->
ago
,
inum
,
nall
,
atom
->
x
,
atom
->
type
,
ilist
,
numneigh
,
firstneigh
,
eflag
,
vflag
,
eflag_atom
,
vflag_atom
,
host_start
,
cpu_time
,
success
,
atom
->
q
,
atom
->
nlocal
,
domain
->
boxlo
,
domain
->
prd
);
}
if
(
!
success
)
error
->
one
(
FLERR
,
"Insufficient memory on accelerator"
);
if
(
host_start
<
inum
)
{
cpu_time
=
MPI_Wtime
();
cpu_compute
(
host_start
,
inum
,
eflag
,
vflag
,
ilist
,
numneigh
,
firstneigh
);
cpu_time
=
MPI_Wtime
()
-
cpu_time
;
}
}
/* ----------------------------------------------------------------------
init specific to this pair style
------------------------------------------------------------------------- */
void
PairLJCutCoulMSMGPU
::
init_style
()
{
cut_respa
=
NULL
;
if
(
force
->
newton_pair
)
error
->
all
(
FLERR
,
"Cannot use newton pair with lj/cut/coul/msm/gpu pair style"
);
if
(
force
->
kspace
->
scalar_pressure_flag
)
error
->
all
(
FLERR
,
"Must use 'kspace_modify pressure/scalar no' with GPU MSM Pair styles"
);
// Repeat cutsq calculation because done after call to init_style
double
maxcut
=
-
1.0
;
double
cut
;
for
(
int
i
=
1
;
i
<=
atom
->
ntypes
;
i
++
)
{
for
(
int
j
=
i
;
j
<=
atom
->
ntypes
;
j
++
)
{
if
(
setflag
[
i
][
j
]
!=
0
||
(
setflag
[
i
][
i
]
!=
0
&&
setflag
[
j
][
j
]
!=
0
))
{
cut
=
init_one
(
i
,
j
);
cut
*=
cut
;
if
(
cut
>
maxcut
)
maxcut
=
cut
;
cutsq
[
i
][
j
]
=
cutsq
[
j
][
i
]
=
cut
;
}
else
cutsq
[
i
][
j
]
=
cutsq
[
j
][
i
]
=
0.0
;
}
}
double
cell_size
=
sqrt
(
maxcut
)
+
neighbor
->
skin
;
cut_coulsq
=
cut_coul
*
cut_coul
;
// setup force tables
if
(
ncoultablebits
)
init_tables
(
cut_coul
,
cut_respa
);
int
maxspecial
=
0
;
if
(
atom
->
molecular
)
maxspecial
=
atom
->
maxspecial
;
int
success
=
ljcm_gpu_init
(
atom
->
ntypes
+
1
,
cutsq
,
lj1
,
lj2
,
lj3
,
lj4
,
force
->
kspace
->
get_gcons
(),
force
->
kspace
->
get_dgcons
(),
offset
,
force
->
special_lj
,
atom
->
nlocal
,
atom
->
nlocal
+
atom
->
nghost
,
300
,
maxspecial
,
cell_size
,
gpu_mode
,
screen
,
cut_ljsq
,
cut_coulsq
,
force
->
special_coul
,
force
->
kspace
->
order
,
force
->
qqrd2e
);
GPU_EXTRA
::
check_flag
(
success
,
error
,
world
);
if
(
gpu_mode
==
GPU_FORCE
)
{
int
irequest
=
neighbor
->
request
(
this
,
instance_me
);
neighbor
->
requests
[
irequest
]
->
half
=
0
;
neighbor
->
requests
[
irequest
]
->
full
=
1
;
}
}
/* ---------------------------------------------------------------------- */
double
PairLJCutCoulMSMGPU
::
memory_usage
()
{
double
bytes
=
Pair
::
memory_usage
();
return
bytes
+
ljcm_gpu_bytes
();
}
/* ---------------------------------------------------------------------- */
void
PairLJCutCoulMSMGPU
::
cpu_compute
(
int
start
,
int
inum
,
int
eflag
,
int
vflag
,
int
*
ilist
,
int
*
numneigh
,
int
**
firstneigh
)
{
int
i
,
j
,
ii
,
jj
,
jnum
,
itype
,
jtype
,
itable
;
double
qtmp
,
xtmp
,
ytmp
,
ztmp
,
delx
,
dely
,
delz
,
evdwl
,
ecoul
,
fpair
;
double
fraction
,
table
;
double
r
,
r2inv
,
r6inv
,
forcecoul
,
forcelj
,
factor_coul
,
factor_lj
;
double
egamma
,
fgamma
,
prefactor
;
int
*
jlist
;
double
rsq
;
double
**
x
=
atom
->
x
;
double
**
f
=
atom
->
f
;
double
*
q
=
atom
->
q
;
int
*
type
=
atom
->
type
;
double
*
special_coul
=
force
->
special_coul
;
double
*
special_lj
=
force
->
special_lj
;
double
qqrd2e
=
force
->
qqrd2e
;
// loop over neighbors of my atoms
for
(
ii
=
start
;
ii
<
inum
;
ii
++
)
{
i
=
ilist
[
ii
];
qtmp
=
q
[
i
];
xtmp
=
x
[
i
][
0
];
ytmp
=
x
[
i
][
1
];
ztmp
=
x
[
i
][
2
];
itype
=
type
[
i
];
jlist
=
firstneigh
[
i
];
jnum
=
numneigh
[
i
];
for
(
jj
=
0
;
jj
<
jnum
;
jj
++
)
{
j
=
jlist
[
jj
];
factor_lj
=
special_lj
[
sbmask
(
j
)];
factor_coul
=
special_coul
[
sbmask
(
j
)];
j
&=
NEIGHMASK
;
delx
=
xtmp
-
x
[
j
][
0
];
dely
=
ytmp
-
x
[
j
][
1
];
delz
=
ztmp
-
x
[
j
][
2
];
rsq
=
delx
*
delx
+
dely
*
dely
+
delz
*
delz
;
jtype
=
type
[
j
];
if
(
rsq
<
cutsq
[
itype
][
jtype
])
{
r2inv
=
1.0
/
rsq
;
if
(
rsq
<
cut_coulsq
)
{
if
(
!
ncoultablebits
||
rsq
<=
tabinnersq
)
{
r
=
sqrt
(
rsq
);
prefactor
=
qqrd2e
*
qtmp
*
q
[
j
]
/
r
;
egamma
=
1.0
-
(
r
/
cut_coul
)
*
force
->
kspace
->
gamma
(
r
/
cut_coul
);
fgamma
=
1.0
+
(
rsq
/
cut_coulsq
)
*
force
->
kspace
->
dgamma
(
r
/
cut_coul
);
forcecoul
=
prefactor
*
fgamma
;
if
(
factor_coul
<
1.0
)
forcecoul
-=
(
1.0
-
factor_coul
)
*
prefactor
;
}
else
{
union_int_float_t
rsq_lookup
;
rsq_lookup
.
f
=
rsq
;
itable
=
rsq_lookup
.
i
&
ncoulmask
;
itable
>>=
ncoulshiftbits
;
fraction
=
(
rsq_lookup
.
f
-
rtable
[
itable
])
*
drtable
[
itable
];
table
=
ftable
[
itable
]
+
fraction
*
dftable
[
itable
];
forcecoul
=
qtmp
*
q
[
j
]
*
table
;
if
(
factor_coul
<
1.0
)
{
table
=
ctable
[
itable
]
+
fraction
*
dctable
[
itable
];
prefactor
=
qtmp
*
q
[
j
]
*
table
;
forcecoul
-=
(
1.0
-
factor_coul
)
*
prefactor
;
}
}
}
else
forcecoul
=
0.0
;
if
(
rsq
<
cut_ljsq
[
itype
][
jtype
])
{
r6inv
=
r2inv
*
r2inv
*
r2inv
;
forcelj
=
r6inv
*
(
lj1
[
itype
][
jtype
]
*
r6inv
-
lj2
[
itype
][
jtype
]);
}
else
forcelj
=
0.0
;
fpair
=
(
forcecoul
+
forcelj
)
*
r2inv
;
f
[
i
][
0
]
+=
delx
*
fpair
;
f
[
i
][
1
]
+=
dely
*
fpair
;
f
[
i
][
2
]
+=
delz
*
fpair
;
if
(
eflag
)
{
if
(
rsq
<
cut_coulsq
)
{
if
(
!
ncoultablebits
||
rsq
<=
tabinnersq
)
ecoul
=
prefactor
*
egamma
;
else
{
table
=
etable
[
itable
]
+
fraction
*
detable
[
itable
];
ecoul
=
qtmp
*
q
[
j
]
*
table
;
}
if
(
factor_coul
<
1.0
)
ecoul
-=
(
1.0
-
factor_coul
)
*
prefactor
;
}
else
ecoul
=
0.0
;
if
(
rsq
<
cut_ljsq
[
itype
][
jtype
])
{
evdwl
=
r6inv
*
(
lj3
[
itype
][
jtype
]
*
r6inv
-
lj4
[
itype
][
jtype
])
-
offset
[
itype
][
jtype
];
evdwl
*=
factor_lj
;
}
else
evdwl
=
0.0
;
}
if
(
evflag
)
ev_tally_full
(
i
,
evdwl
,
ecoul
,
fpair
,
delx
,
dely
,
delz
);
}
}
}
}
Event Timeline
Log In to Comment