当前位置: 首页>>代码示例>>C++>>正文


C++ MPI_Alltoall函数代码示例

本文整理汇总了C++中MPI_Alltoall函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Alltoall函数的具体用法?C++ MPI_Alltoall怎么用?C++ MPI_Alltoall使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了MPI_Alltoall函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: exch_addr

int exch_addr(void)
{
    MPI_Status status;

	int i, rc;

    rc = MPI_Alltoall((void *)conn.qp_num, sizeof(uint32_t), MPI_BYTE, 
            (void *)rbuf.qp_num, sizeof(uint32_t), MPI_BYTE, l_state.world_comm);
   
    assert(!rc); 
    rc = MPI_Alltoall((void *)conn.lid, sizeof(uint16_t), MPI_BYTE, 
            (void *)rbuf.lid, sizeof(uint16_t), MPI_BYTE, l_state.world_comm);
    assert(!rc); 

#ifdef DEBUG
    for (i = 0; i < nprocs; i++) {
        if (me == i)
            continue;
        fprintf(stdout,"[%d] Remote QP %d, Remote LID %u, Rkey %u, Lkey %u\n"
                " LBuf %p, RBuf %p\n", 
                me, rbuf.qp_num[i], rbuf.lid[i], rbuf.rkey[i], lbuf.mr->lkey,
                lbuf.buf, rbuf.buf[i]);
        fflush(stdout);
    }
#endif

    return 0;
}
开发者ID:bcernohous,项目名称:ga,代码行数:28,代码来源:openib.c

示例2: apply

static void apply(const plan *ego_, R *I, R *O)
{
     const P *ego = (const P *) ego_;
     plan_rdft *cld1, *cld2, *cld2rest, *cld3;

     /* transpose locally to get contiguous chunks */
     cld1 = (plan_rdft *) ego->cld1;
     if (cld1) {
	  cld1->apply(ego->cld1, I, O);
	  
	  /* transpose chunks globally */
	  if (ego->equal_blocks)
	       MPI_Alltoall(O, ego->send_block_sizes[0], FFTW_MPI_TYPE,
			    I, ego->recv_block_sizes[0], FFTW_MPI_TYPE,
			    ego->comm);
	  else
	       MPI_Alltoallv(O, ego->send_block_sizes, ego->send_block_offsets,
			     FFTW_MPI_TYPE,
			     I, ego->recv_block_sizes, ego->recv_block_offsets,
			     FFTW_MPI_TYPE,
			     ego->comm);
     }
     else { /* TRANSPOSED_IN, no need to destroy input */
	  /* transpose chunks globally */
	  if (ego->equal_blocks)
	       MPI_Alltoall(I, ego->send_block_sizes[0], FFTW_MPI_TYPE,
			    O, ego->recv_block_sizes[0], FFTW_MPI_TYPE,
			    ego->comm);
	  else
	       MPI_Alltoallv(I, ego->send_block_sizes, ego->send_block_offsets,
			     FFTW_MPI_TYPE,
			     O, ego->recv_block_sizes, ego->recv_block_offsets,
			     FFTW_MPI_TYPE,
			     ego->comm);
	  I = O; /* final transpose (if any) is in-place */
     }
     
     /* transpose locally, again, to get ordinary row-major */
     cld2 = (plan_rdft *) ego->cld2;
     if (cld2) {
	  cld2->apply(ego->cld2, I, O);
	  cld2rest = (plan_rdft *) ego->cld2rest;
	  if (cld2rest) { /* leftover from unequal block sizes */
	       cld2rest->apply(ego->cld2rest,
			       I + ego->rest_Ioff, O + ego->rest_Ooff);
	       cld3 = (plan_rdft *) ego->cld3;
	       if (cld3)
		    cld3->apply(ego->cld3, O, O);
	       /* else TRANSPOSED_OUT is true and user wants O transposed */
	  }
     }
}
开发者ID:376473984,项目名称:fftw3,代码行数:52,代码来源:transpose-alltoall.c

示例3: main

int main( int argc, char* argv[] )
{
  int i, j;
  int myrank, nprocs;
  char *sbuf,  *rbuf;
  int dsize;

  MPI_Init( &argc, &argv );
  
  MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
  MPI_Comm_size( MPI_COMM_WORLD, &nprocs );
  MPI_Type_size(DATATYPE, &dsize);

  sbuf=(char*)malloc(SIZE*dsize*nprocs);
  rbuf=(char*)malloc(SIZE*dsize*nprocs);

  for( i=0; i<REPEAT; i++ )
    {
      MPI_Alltoall( sbuf, SIZE, DATATYPE,
		    rbuf, SIZE, DATATYPE,
		    MPI_COMM_WORLD );
    }

  MPI_Finalize();
  return 0;
}
开发者ID:nerscadmin,项目名称:IPM,代码行数:26,代码来源:main.c

示例4: execute_predefined_op

static inline void execute_predefined_op(int opnum, void* args, void* scratch) {
	if (opnum == -1) {
		MPI_Barrier(G_GOAL_WorldComm);
	}
	else if (opnum == -2) {
		struct bcast_args* bc = (struct bcast_args*) args;
		MPI_Bcast(bc->buffer, bc->count, MPI_BYTE, bc->root, G_GOAL_WorldComm);
	}
	else if (opnum == -3) {
		struct scatter_args* sc = (struct scatter_args*) args;
		MPI_Scatter(sc->sendbuffer, sc->count, MPI_BYTE, sc->recvbuffer, sc->count, MPI_BYTE, sc->root, G_GOAL_WorldComm);
	}
	else if (opnum == -4) {
		struct scatter_args* ga = (struct scatter_args*) args;
		MPI_Gather(ga->sendbuffer, ga->count, MPI_BYTE, ga->recvbuffer, ga->count, MPI_BYTE, ga->root, G_GOAL_WorldComm);
	}
	else if (opnum == -5) {
		struct alltoall_args* aa = (struct alltoall_args*) args;
		MPI_Alltoall(aa->sendbuffer, aa->count, MPI_BYTE, aa->recvbuffer, aa->count, MPI_BYTE, G_GOAL_WorldComm);
	}
	else if (opnum == -99) {
		/* dummy op - do nothing */
	}
	else  {
		printf("Predefined op number %i is not implemented yet\n", opnum);
	}
}
开发者ID:sriram87,项目名称:mpi-goal,代码行数:27,代码来源:GOAL_Tran_MPI.hpp

示例5: transpose_mpi_out_of_place

/* Out-of-place version of transpose_mpi (or rather, in place using
   a scratch array): */
static void transpose_mpi_out_of_place(transpose_mpi_plan p, int el_size,
				       TRANSPOSE_EL_TYPE *local_data,
				       TRANSPOSE_EL_TYPE *work)
{
     local_transpose_copy(local_data, work, el_size, p->local_nx, p->ny);

     if (p->all_blocks_equal)
	  MPI_Alltoall(work, p->send_block_size * el_size, p->el_type,
		       local_data, p->recv_block_size * el_size, p->el_type,
		       p->comm);
     else {
	  int i, n_pes = p->n_pes;

	  for (i = 0; i < n_pes; ++i) {
	       p->send_block_sizes[i] *= el_size;
	       p->recv_block_sizes[i] *= el_size;
	       p->send_block_offsets[i] *= el_size;
	       p->recv_block_offsets[i] *= el_size;
	  }
	  MPI_Alltoallv(work, p->send_block_sizes, p->send_block_offsets,
			p->el_type,
			local_data, p->recv_block_sizes, p->recv_block_offsets,
			p->el_type,
			p->comm);
	  for (i = 0; i < n_pes; ++i) {
	       p->send_block_sizes[i] /= el_size;
	       p->recv_block_sizes[i] /= el_size;
	       p->send_block_offsets[i] /= el_size;
	       p->recv_block_offsets[i] /= el_size;
	  }
     }

     do_permutation(local_data, p->perm_block_dest, p->num_perm_blocks,
		    p->perm_block_size * el_size);
}
开发者ID:JonBoley,项目名称:peaqb-fast,代码行数:37,代码来源:transpose_mpi.c

示例6: main

int main(int argc, char** argv)
{
  // Initialize MPI
  MPI_Init(&argc, &argv);

  int size, rank;

  // Figure out the number of processes and our rank in the world group
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if (size % 2) {
    printf("Need an even number of processes\n");
    MPI_Finalize();
    return 1;
  }

  // setup new communicators
  MPI_Comm twocomm;
  MPI_Comm_split(MPI_COMM_WORLD, rank/2, rank%2, &twocomm);

  int senddata[2], recvdata[2];
  senddata[(rank+1)%2] = rank;
  senddata[rank%2] = 0;
  MPI_Alltoall(senddata, 1, MPI_INT, recvdata, 1, MPI_INT, twocomm);

  // print to tty
  printf("process %i: received %i\n", rank, recvdata[(rank+1)%2]);

  // close down MPI
  MPI_Finalize();

  // ay-oh-kay
  return 0;
}
开发者ID:hgranlund,项目名称:tma4280,代码行数:35,代码来源:mpiping-collective.c

示例7: MADRE_exchange

void MADRE_exchange(MC* mc, int *myRecvCount, int *mySendCount){
  int i;
  Particle *p;
  p = mc->particles;
  //cache blockLength
  int blockLength = MADRE_BLOCK_LENGTH;

  /* MADRE_pack should have constructed an integer number of blocks */
  assert(mc->nparticles % (int)MADRE_BLOCK_LENGTH == 0);
  int liveBlocks = mc->nparticles/blockLength;
  for (i=0; i<liveBlocks; ++i) destRanks[i] = p[i*blockLength].proc;

  /* By default, this was set to zero */
  myRecvCount[mc->mype] = mySendCount[mc->mype];

  /* Organize destIndices by proc-rank order */
  displ[0] = 0; 
  for (i=1;i<(mc->nprocs);++i) displ[i] = displ[i-1] + myRecvCount[i-1]/blockLength;

  /* Alltoall where each proc can start receiving particles to get destIndices */
  MPI_Alltoall(displ, 1, MPI_INT, sdispl, 1, MPI_INT, MPI_COMM_WORLD);

  for (i=0; i<liveBlocks; ++i){
    destIndices[i]= sdispl[p[i*blockLength].proc];
    sdispl[p[i*blockLength].proc]++;
  }

  MADRE_redistribute(MADRE_particle, liveBlocks, destRanks, destIndices); 

  mc->nparticles = isum(myRecvCount, mc->nprocs);
  /* Each proc should have an integer number of blocks after exchanges */
  assert(mc->nparticles % (int)MADRE_BLOCK_LENGTH == 0);
}
开发者ID:shamouda,项目名称:x10-applications,代码行数:33,代码来源:MC_Comm.c

示例8: main

int main( int argc, char **argv )
{
    int send[4], recv[4];
    int rank, size, k;
    
    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );
    
    if (size != 4) {
        printf("Error!:# of processors must be equal to 4\n");
        printf("Programm aborting....\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    for (k=0;k<size;k++) send[k] = (k+1) + rank*size;
    
    printf("%d : send = %d %d %d %d\n", rank, send[0], send[1], send[2], send[3]);
    
    MPI_Alltoall(send, 1, MPI_INT, recv, 1, MPI_INT, MPI_COMM_WORLD);
    
    printf("%d : recv = %d %d %d %d\n", rank, recv[0], recv[1], recv[2], recv[3]);
    
    MPI_Finalize();
    return 0;
}
开发者ID:arnabd88,项目名称:CIVL-CS6110,代码行数:25,代码来源:matTrans.c

示例9: exchangetest

/* run an exchange test with msgsz bytes per proc with bytes transferred
 * actually nproc*msgsz per exchange (all-to-all).
 */
double exchangetest(int iters, int msgsz) {
  int64_t starttime, endtime;
  int i;
  char *sendbuf, *recvbuf;

  sendbuf = malloc(msgsz*nproc);
  recvbuf = malloc(msgsz*nproc);

  if (sendbuf == NULL || recvbuf == NULL) {
    fprintf(stderr, "malloc");
    exit(-1);
  }

  barrier();

  starttime = getMicrosecondTimeStamp();
  for (i=0; i<iters; i++) {
    MPI_Alltoall(sendbuf, msgsz, MPI_CHAR, 
		 recvbuf, msgsz, MPI_CHAR, MPI_COMM_WORLD);
  }
  endtime = getMicrosecondTimeStamp();

  free(sendbuf);
  free(recvbuf);

  return (endtime-starttime);
}
开发者ID:AbheekG,项目名称:chapel,代码行数:30,代码来源:testmpiperf.c

示例10: main

int main(int argc, char *argv[])
{
    int rank, size;
    int chunk = 128;
    int i;
    int *sb;
    int *rb;
    int status, gstatus;

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    for (i = 1; i < argc; ++i) {
        if (argv[i][0] != '-')
            continue;
        switch (argv[i][1]) {
        case 'm':
            chunk = atoi(argv[++i]);
            break;
        default:
            fprintf(stderr, "Unrecognized argument %s\n", argv[i]);
            MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
        }
    }

    sb = (int *) malloc(size * chunk * sizeof(int));
    if (!sb) {
        perror("can't allocate send buffer");
        MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    }
    rb = (int *) malloc(size * chunk * sizeof(int));
    if (!rb) {
        perror("can't allocate recv buffer");
        free(sb);
        MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    }
    for (i = 0; i < size * chunk; ++i) {
        sb[i] = rank + 1;
        rb[i] = 0;
    }

    /* fputs("Before MPI_Alltoall\n",stdout); */

    /* This should really send MPI_CHAR, but since sb and rb were allocated
     * as chunk*size*sizeof(int), the buffers are large enough */
    status = MPI_Alltoall(sb, chunk, MPI_INT, rb, chunk, MPI_INT, MPI_COMM_WORLD);

    /* fputs("Before MPI_Allreduce\n",stdout); */

    MTest_Finalize(status);

    free(sb);
    free(rb);

    MPI_Finalize();

    return MTestReturnValue(status);
}
开发者ID:NexMirror,项目名称:MPICH,代码行数:59,代码来源:coll13.c

示例11: mpi_alltoall

void mpi_alltoall (void *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
		   void *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, 
		   MPI_Fint *comm, MPI_Fint *__ierr)
{
  *__ierr = MPI_Alltoall (sendbuf, *sendcount, MPI_Type_f2c(*sendtype),
			  recvbuf, *recvcount, MPI_Type_f2c(*recvtype), 
			  MPI_Comm_f2c (*comm));
}
开发者ID:JeremyFyke,项目名称:cime,代码行数:8,代码来源:f_wrappers_pmpi.c

示例12: ReAllocateRasterBlock

int ReAllocateRasterBlock( void * SendBuf, int SendCount, MPI_Datatype SendType,
                           void * RecvBuf, int RecvCount, MPI_Datatype RecvType,
                           MPI_Comm Comm )
{
    return MPI_Alltoall(SendBuf, SendCount, SendType,
                        RecvBuf, RecvCount, RecvType,
                        Comm);
}
开发者ID:htoooth,项目名称:hpgc_new,代码行数:8,代码来源:mpioperator.reallocate.cpp

示例13: all_to_all

 static void all_to_all(const communicator& comm, const std::vector<T>& in, std::vector<T>& out, int n = 1)
 {
   // NB: this will fail if T is a vector
   MPI_Alltoall(Datatype::address(const_cast<T&>(in[0])), n,
                Datatype::datatype(),
                Datatype::address(out[0]), n,
                Datatype::datatype(),
                comm);
 }
开发者ID:SINTEFMedtek,项目名称:VTK,代码行数:9,代码来源:collectives.hpp

示例14: kmr_exchange_sizes

int
kmr_exchange_sizes(KMR *mr, long *sbuf, long *rbuf)
{
    MPI_Comm comm = mr->comm;
    int cc;
    cc = MPI_Alltoall(sbuf, 1, MPI_LONG, rbuf, 1, MPI_LONG, comm);
    assert(cc == MPI_SUCCESS);
    return MPI_SUCCESS;
}
开发者ID:tnishinaga,项目名称:kmr,代码行数:9,代码来源:kmratoa.c

示例15: FC_FUNC

FC_FUNC( mpi_alltoall , MPI_ALLTOALL )
                        ( void *sendbuf, int *sendcount, int *sendtype,
			  void *recvbuf, int *recvcount, int *recvtype,
                          int *comm, int *ierror )
{
  *ierror=MPI_Alltoall(sendbuf, *sendcount, *sendtype,
		       recvbuf, *recvcount, *recvtype,
		       *comm);
}
开发者ID:ACME-Climate,项目名称:cime,代码行数:9,代码来源:collective.c


注:本文中的MPI_Alltoall函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。