本文整理汇总了C++中AtomVec::unpack_exchange方法的典型用法代码示例。如果您正苦于以下问题:C++ AtomVec::unpack_exchange方法的具体用法?C++ AtomVec::unpack_exchange怎么用?C++ AtomVec::unpack_exchange使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AtomVec
的用法示例。
在下文中一共展示了AtomVec::unpack_exchange方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: exchange
//.........这里部分代码省略.........
// new ghosts are created in borders()
// map_set() is done at end of borders()
// clear ghost count and any ghost bonus data internal to AtomVec
if (map_style) atom->map_clear();
atom->nghost = 0;
atom->avec->clear_bonus();
// insure send buf is large enough for single atom
// bufextra = max size of one atom = allowed overflow of sendbuf
// fixes can change per-atom size requirement on-the-fly
int bufextra_old = bufextra;
maxexchange = maxexchange_atom + maxexchange_fix;
bufextra = maxexchange + BUFEXTRA;
if (bufextra > bufextra_old)
memory->grow(buf_send,maxsend+bufextra,"comm:buf_send");
// subbox bounds for orthogonal or triclinic
if (triclinic == 0) {
sublo = domain->sublo;
subhi = domain->subhi;
} else {
sublo = domain->sublo_lamda;
subhi = domain->subhi_lamda;
}
// loop over dimensions
int dimension = domain->dimension;
for (int dim = 0; dim < dimension; dim++) {
// fill buffer with atoms leaving my box, using < and >=
// when atom is deleted, fill it in with last atom
x = atom->x;
lo = sublo[dim];
hi = subhi[dim];
nlocal = atom->nlocal;
i = nsend = 0;
while (i < nlocal) {
if (x[i][dim] < lo || x[i][dim] >= hi) {
if (nsend > maxsend) grow_send(nsend,1);
nsend += avec->pack_exchange(i,&buf_send[nsend]);
avec->copy(nlocal-1,i,1);
nlocal--;
} else i++;
}
atom->nlocal = nlocal;
// send/recv atoms in both directions
// send size of message first so receiver can realloc buf_recv if needed
// if 1 proc in dimension, no send/recv
// set nrecv = 0 so buf_send atoms will be lost
// if 2 procs in dimension, single send/recv
// if more than 2 procs in dimension, send/recv to both neighbors
if (procgrid[dim] == 1) nrecv = 0;
else {
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][0],0,
&nrecv1,1,MPI_INT,procneigh[dim][1],0,world,MPI_STATUS_IGNORE);
nrecv = nrecv1;
if (procgrid[dim] > 2) {
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][1],0,
&nrecv2,1,MPI_INT,procneigh[dim][0],0,world,MPI_STATUS_IGNORE);
nrecv += nrecv2;
}
if (nrecv > maxrecv) grow_recv(nrecv);
MPI_Irecv(buf_recv,nrecv1,MPI_DOUBLE,procneigh[dim][1],0,
world,&request);
MPI_Send(buf_send,nsend,MPI_DOUBLE,procneigh[dim][0],0,world);
MPI_Wait(&request,MPI_STATUS_IGNORE);
if (procgrid[dim] > 2) {
MPI_Irecv(&buf_recv[nrecv1],nrecv2,MPI_DOUBLE,procneigh[dim][0],0,
world,&request);
MPI_Send(buf_send,nsend,MPI_DOUBLE,procneigh[dim][1],0,world);
MPI_Wait(&request,MPI_STATUS_IGNORE);
}
}
// check incoming atoms to see if they are in my box
// if so, add to my list
// box check is only for this dimension,
// atom may be passed to another proc in later dims
m = 0;
while (m < nrecv) {
value = buf_recv[m+dim+1];
if (value >= lo && value < hi) m += avec->unpack_exchange(&buf_recv[m]);
else m += static_cast<int> (buf_recv[m]);
}
}
if (atom->firstgroupname) atom->first_reorder();
}
示例2: exchange_cuda
void CommCuda::exchange_cuda()
{
int i,m,nsend,nrecv,nrecv1,nrecv2,nlocal;
double lo,hi,value;
double **x;
double *sublo,*subhi,*buf;
MPI_Request request;
MPI_Status status;
AtomVec *avec = atom->avec;
timespec time1,time2,time3;
// clear global->local map for owned and ghost atoms
// b/c atoms migrate to new procs in exchange() and
// new ghosts are created in borders()
// map_set() is done at end of borders()
if(map_style) cuda->cu_tag->download();
if (map_style) atom->map_clear();
// subbox bounds for orthogonal or triclinic
if (triclinic == 0) {
sublo = domain->sublo;
subhi = domain->subhi;
} else {
sublo = domain->sublo_lamda;
subhi = domain->subhi_lamda;
}
// loop over dimensions
for (int dim = 0; dim < 3; dim++) {
// fill buffer with atoms leaving my box, using < and >=
// when atom is deleted, fill it in with last atom
cuda->shared_data.exchange_dim=dim;
nlocal = atom->nlocal;
avec->maxsend=&maxsend;
nsend=avec->pack_exchange(dim,(double*) &buf_send);
nlocal = atom->nlocal;
atom->nlocal = nlocal;
// send/recv atoms in both directions
// if 1 proc in dimension, no send/recv, set recv buf to send buf
// if 2 procs in dimension, single send/recv
// if more than 2 procs in dimension, send/recv to both neighbors
clock_gettime(CLOCK_REALTIME,&time1);
if (procgrid[dim] == 1) {
nrecv = nsend;
buf = buf_send;
} else {
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][0],0,
&nrecv1,1,MPI_INT,procneigh[dim][1],0,world,&status);
nrecv = nrecv1;
if (procgrid[dim] > 2) {
MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][1],0,
&nrecv2,1,MPI_INT,procneigh[dim][0],0,world,&status);
nrecv += nrecv2;
}
if (nrecv+1 > maxrecv) grow_recv(nrecv+1);
MPI_Irecv(buf_recv,nrecv1,MPI_DOUBLE,procneigh[dim][1],0,
world,&request);
MPI_Send(buf_send,nsend,MPI_DOUBLE,procneigh[dim][0],0,world);
MPI_Wait(&request,&status);
if (procgrid[dim] > 2) {
MPI_Irecv(&buf_recv[nrecv1],nrecv2,MPI_DOUBLE,procneigh[dim][0],0,
world,&request);
MPI_Send(buf_send,nsend,MPI_DOUBLE,procneigh[dim][1],0,world);
MPI_Wait(&request,&status);
if((nrecv1==0)||(nrecv2==0)) buf_recv[nrecv]=0;
}
buf = buf_recv;
}
//printf("nsend: %i nrecv: %i\n",nsend,nrecv);
// check incoming atoms to see if they are in my box
// if so, add to my list
clock_gettime(CLOCK_REALTIME,&time2);
cuda->shared_data.cuda_timings.comm_exchange_mpi+=
time2.tv_sec-time1.tv_sec+1.0*(time2.tv_nsec-time1.tv_nsec)/1000000000;
if(nrecv)
{
avec->maxsend=&maxsend;
avec->unpack_exchange(buf);
}
}
if(atom->firstgroupname) cuda->downloadAll();
//.........这里部分代码省略.........