本文整理汇总了C++中AtomVec::pack_border方法的典型用法代码示例。如果您正苦于以下问题:C++ AtomVec::pack_border方法的具体用法?C++ AtomVec::pack_border怎么用?C++ AtomVec::pack_border使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AtomVec
的用法示例。
在下文中一共展示了AtomVec::pack_border方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: borders_cuda
void CommCuda::borders_cuda()
{
int i,n,itype,iswap,dim,ineed,maxneed,smax,rmax;
int nsend,nrecv,nfirst,nlast,ngroup;
double lo,hi;
int *type;
double **x;
double *buf,*mlo,*mhi;
MPI_Request request;
MPI_Status status;
AtomVec *avec = atom->avec;
timespec time1,time2,time3;
// clear old ghosts
atom->nghost = 0;
// do swaps over all 3 dimensions
iswap = 0;
smax = rmax = 0;
cuda->shared_data.comm.nsend=0;
for (dim = 0; dim < 3; dim++) {
nlast = 0;
maxneed = 2*need[dim];
for (ineed = 0; ineed < maxneed; ineed++) {
// find atoms within slab boundaries lo/hi using <= and >=
// check atoms between nfirst and nlast
// for first swaps in a dim, check owned and ghost
// for later swaps in a dim, only check newly arrived ghosts
// store sent atom indices in list for use in future timesteps
x = atom->x;
if (style == SINGLE) {
lo = slablo[iswap];
hi = slabhi[iswap];
} else {
type = atom->type;
mlo = multilo[iswap];
mhi = multihi[iswap];
}
if (ineed % 2 == 0) {
nfirst = nlast;
nlast = atom->nlocal + atom->nghost;
}
nsend = 0;
// find send atoms according to SINGLE vs MULTI
// all atoms eligible versus atoms in bordergroup
// only need to limit loop to bordergroup for first sends (ineed < 2)
// on these sends, break loop in two: owned (in group) and ghost
do
{
if(nsend>=maxsendlist[iswap]) grow_list(iswap,static_cast <int> (nsend*1.05));
nsend=Cuda_CommCuda_BuildSendlist(&cuda->shared_data,bordergroup,ineed,style==SINGLE?1:0,atom->nfirst,nfirst,nlast,dim,iswap);
}while(nsend>=maxsendlist[iswap]);
// pack up list of border atoms
if (nsend*size_border > maxsend)
grow_send(nsend*size_border,0);
if (ghost_velocity)
n = avec->pack_border_vel(nsend,&iswap,buf_send,
pbc_flag[iswap],pbc[iswap]);
else
n = avec->pack_border(nsend,&iswap,buf_send,
pbc_flag[iswap],pbc[iswap]);
// swap atoms with other proc
// put incoming ghosts at end of my atom arrays
// if swapping with self, simply copy, no messages
clock_gettime(CLOCK_REALTIME,&time1);
if (sendproc[iswap] != me) {
MPI_Sendrecv(&nsend,1,MPI_INT,sendproc[iswap],0,
&nrecv,1,MPI_INT,recvproc[iswap],0,world,&status);
if (nrecv*size_border > maxrecv)
grow_recv(nrecv*size_border);
MPI_Irecv(buf_recv,nrecv*size_border,MPI_DOUBLE,
recvproc[iswap],0,world,&request);
MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
MPI_Wait(&request,&status);
buf = buf_recv;
} else {
nrecv = nsend;
buf = buf_send;
}
clock_gettime(CLOCK_REALTIME,&time2);
cuda->shared_data.cuda_timings.comm_border_mpi+=
time2.tv_sec-time1.tv_sec+1.0*(time2.tv_nsec-time1.tv_nsec)/1000000000;
// unpack buffer
if (ghost_velocity)
avec->unpack_border_vel(nrecv,atom->nlocal+atom->nghost,buf);
else
//.........这里部分代码省略.........
示例2: borders
//.........这里部分代码省略.........
sendlist[iswap][nsend++] = i;
}
}
}
} else {
if (mode == SINGLE) {
ngroup = atom->nfirst;
for (i = 0; i < ngroup; i++)
if (x[i][dim] >= lo && x[i][dim] <= hi) {
if (nsend == maxsendlist[iswap]) grow_list(iswap,nsend);
sendlist[iswap][nsend++] = i;
}
for (i = atom->nlocal; i < nlast; i++)
if (x[i][dim] >= lo && x[i][dim] <= hi) {
if (nsend == maxsendlist[iswap]) grow_list(iswap,nsend);
sendlist[iswap][nsend++] = i;
}
} else {
ngroup = atom->nfirst;
for (i = 0; i < ngroup; i++) {
itype = type[i];
if (x[i][dim] >= mlo[itype] && x[i][dim] <= mhi[itype]) {
if (nsend == maxsendlist[iswap]) grow_list(iswap,nsend);
sendlist[iswap][nsend++] = i;
}
}
for (i = atom->nlocal; i < nlast; i++) {
itype = type[i];
if (x[i][dim] >= mlo[itype] && x[i][dim] <= mhi[itype]) {
if (nsend == maxsendlist[iswap]) grow_list(iswap,nsend);
sendlist[iswap][nsend++] = i;
}
}
}
}
}
// pack up list of border atoms
if (nsend*size_border > maxsend) grow_send(nsend*size_border,0);
if (ghost_velocity)
n = avec->pack_border_vel(nsend,sendlist[iswap],buf_send,
pbc_flag[iswap],pbc[iswap]);
else
n = avec->pack_border(nsend,sendlist[iswap],buf_send,
pbc_flag[iswap],pbc[iswap]);
// swap atoms with other proc
// no MPI calls except SendRecv if nsend/nrecv = 0
// put incoming ghosts at end of my atom arrays
// if swapping with self, simply copy, no messages
if (sendproc[iswap] != me) {
MPI_Sendrecv(&nsend,1,MPI_INT,sendproc[iswap],0,
&nrecv,1,MPI_INT,recvproc[iswap],0,world,MPI_STATUS_IGNORE);
if (nrecv*size_border > maxrecv) grow_recv(nrecv*size_border);
if (nrecv) MPI_Irecv(buf_recv,nrecv*size_border,MPI_DOUBLE,
recvproc[iswap],0,world,&request);
if (n) MPI_Send(buf_send,n,MPI_DOUBLE,sendproc[iswap],0,world);
if (nrecv) MPI_Wait(&request,MPI_STATUS_IGNORE);
buf = buf_recv;
} else {
nrecv = nsend;
buf = buf_send;
}
// unpack buffer
if (ghost_velocity)
avec->unpack_border_vel(nrecv,atom->nlocal+atom->nghost,buf);
else
avec->unpack_border(nrecv,atom->nlocal+atom->nghost,buf);
// set all pointers & counters
smax = MAX(smax,nsend);
rmax = MAX(rmax,nrecv);
sendnum[iswap] = nsend;
recvnum[iswap] = nrecv;
size_forward_recv[iswap] = nrecv*size_forward;
size_reverse_send[iswap] = nrecv*size_reverse;
size_reverse_recv[iswap] = nsend*size_reverse;
firstrecv[iswap] = atom->nlocal + atom->nghost;
atom->nghost += nrecv;
iswap++;
}
}
// insure send/recv buffers are long enough for all forward & reverse comm
int max = MAX(maxforward*smax,maxreverse*rmax);
if (max > maxsend) grow_send(max,0);
max = MAX(maxforward*rmax,maxreverse*smax);
if (max > maxrecv) grow_recv(max);
// reset global->local map
if (map_style) atom->map_set();
}