当前位置: 首页>>代码示例>>C++>>正文


C++ MPI_Comm_split函数代码示例

本文整理汇总了C++中MPI_Comm_split函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Comm_split函数的具体用法?C++ MPI_Comm_split怎么用?C++ MPI_Comm_split使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了MPI_Comm_split函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: setupCommunicators

/*-----------------------------------------------------------*/
int setupCommunicators(){
	int procHash;

	/* Get hash from processor name */
	procHash = procNameToHash();

	/* Comm_split using procHash as colour to get
	 * local communicator.
	 */
	MPI_Comm_split(comm, procHash, 0, &localComm);

	/* Find ranks of processes in localComm */
	MPI_Comm_rank(localComm, &localCommRank);

	/* Find the size of localComm (for use in calculating multi datasize) */
	MPI_Comm_size(localComm, &localCommSize);

	/* Use localRank as colour to get communicator across nodes. */
	MPI_Comm_split(comm, localCommRank, 0, &crossComm);

	/* Find ranks of processes in crossComm */
	MPI_Comm_rank(crossComm, &crossCommRank);

    MPI_Barrier(comm);

	return 0;
}
开发者ID:jbreitbart,项目名称:OpenMP-GASPI-MicroBenchmark-Suite,代码行数:28,代码来源:parallelEnvironment.c

示例2: assignment

/*@C
  PetscSubcommSetTypeGeneral - Set type of subcommunicators from user's specifications

   Collective on MPI_Comm

   Input Parameter:
+  psubcomm - PetscSubcomm context
.  color   - control of subset assignment (nonnegative integer). Processes with the same color are in the same subcommunicator.
.  subrank - rank in the subcommunicator
-  duprank - rank in the dupparent (see PetscSubcomm)

   Level: advanced

.keywords: communicator, create

.seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetType()
@*/
PetscErrorCode  PetscSubcommSetTypeGeneral(PetscSubcomm psubcomm,PetscMPIInt color,PetscMPIInt subrank,PetscMPIInt duprank)
{
  PetscErrorCode ierr;
  MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;
  PetscMPIInt    size;

  PetscFunctionBegin;
  if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
  if (psubcomm->n < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %D is incorrect. Call PetscSubcommSetNumber()",psubcomm->n);

  ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);

  /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm
     if duprank is not a valid number, then dupcomm is not created - not all applications require dupcomm! */
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  if (duprank == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"duprank==PETSC_DECIDE is not supported yet");
  else if (duprank >= 0 && duprank < size){
    ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
  }
  ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,PETSC_NULL);CHKERRQ(ierr);
  ierr = PetscCommDuplicate(subcomm,&psubcomm->comm,PETSC_NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);
  psubcomm->color     = color;
  PetscFunctionReturn(0);
}
开发者ID:erdc-cm,项目名称:petsc-dev,代码行数:43,代码来源:subcomm.c

示例3: mesh

void mesh(MPI_Comm comm, MPI_Comm *row_comm, MPI_Comm *col_comm, 
		int p, int q, int iam, int np, int *riam, int *ciam)
{
	//processers not enough
	if (np < p * q)
		return ;

	int color;
	if (iam < p * q) {
		color = iam / q;
		//int MPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm)
		//1. Use MPI_Allgather to get the color and key from each process
		//2. Count the number of processes with the same color; create a 
		//communicator with that many processes.  If this process has
		//MPI_UNDEFINED as the color, create a process with a single member.
		//3. Use key to order the ranks
		//4. Set the VCRs using the ordered key values
		MPI_Comm_split(comm, color, iam, row_comm);

		color = iam % q; 
		MPI_Comm_split(comm, color, iam, col_comm);

		//comm in the same row, different col
		MPI_Comm_rank(*row_comm, ciam);

		//comm in the same col, different row
		MPI_Comm_rank(*col_comm, riam);
	} else {
		color = MPI_UNDEFINED;
		MPI_Comm_split(comm, color, iam, row_comm);
		MPI_Comm_split(comm, color, iam, col_comm);
	}
}
开发者ID:Jokeren,项目名称:Tutorial,代码行数:33,代码来源:partition.c

示例4: _create_segment_group

static void
_create_segment_group(struct SegmentGroupDescr * descr, size_t * sizes, size_t * outsizes, size_t avgsegsize, int Ngroup, MPI_Comm comm)
{
    int i;
    int ThisTask, NTask;

    MPI_Comm_size(comm, &NTask);
    MPI_Comm_rank(comm, &ThisTask);

    descr->ThisSegment = _assign_colors(avgsegsize, sizes, outsizes, &descr->Nsegments, comm);

    if(descr->ThisSegment >= 0) {
        /* assign segments to groups.
         * if Nsegments < Ngroup, some groups will have no segments, and thus no ranks belong to them. */
        descr->GroupID = ((size_t) descr->ThisSegment) * Ngroup / descr->Nsegments;
    } else {
        descr->GroupID = Ngroup + 1;
        descr->ThisSegment = NTask + 1;
    }

    descr->Ngroup = Ngroup;

    MPI_Comm_split(comm, descr->GroupID, ThisTask, &descr->Group);

    MPI_Allreduce(&descr->ThisSegment, &descr->segment_start, 1, MPI_INT, MPI_MIN, descr->Group);
    MPI_Allreduce(&descr->ThisSegment, &descr->segment_end, 1, MPI_INT, MPI_MAX, descr->Group);

    descr->segment_end ++;

    int rank;

    MPI_Comm_rank(descr->Group, &rank);

    struct { 
        size_t val;
        int   rank;
    } leader_st;

    leader_st.val = sizes[ThisTask];
    leader_st.rank = rank;

    MPI_Allreduce(MPI_IN_PLACE, &leader_st, 1, MPI_LONG_INT, MPI_MAXLOC, descr->Group);

    descr->is_group_leader = rank == leader_st.rank;
    descr->group_leader_rank = leader_st.rank;

    MPI_Comm_split(comm, rank == leader_st.rank? 0 : 1, ThisTask, &descr->Leader);

    MPI_Comm_split(descr->Group, descr->ThisSegment, ThisTask, &descr->Segment);
    int rank2;

    MPI_Comm_rank(descr->Segment, &rank2);

    leader_st.val = sizes[ThisTask];
    leader_st.rank = rank2;

    MPI_Allreduce(MPI_IN_PLACE, &leader_st, 1, MPI_LONG_INT, MPI_MINLOC, descr->Segment);
    descr->segment_leader_rank = leader_st.rank;
}
开发者ID:rainwoodman,项目名称:MP-sort,代码行数:59,代码来源:mpsort-mpi.c

示例5: prepareMPIComm

//Set global mpi_comm and mpi_group
//Check if the number of nodes is to the power of 2 ( 2, 4, 8, 16, ...)
//and if not create a new group that contains all nodes up to the closest lowest valid number of nodes.
//Return 1 if the node is part of the active comm world
//Return 0 if not
char prepareMPIComm(void)
{
    float tf;
    int ti;
    int idOld;
    char activeNode;

    //Get Rank and GroupSize
    MPI_Comm_rank (MPI_COMM_WORLD, &idOld);        /* get current process id */
    MPI_Comm_size (MPI_COMM_WORLD, &mpi_size);        /* get number of processes */

    if(mpi_size < 2) {
        printf("Error! At least two nodes are needed!\n");
        MPI_Finalize();
        exit(EXIT_FAILURE);
    }

    //Test for multiples of 2
    tf = log(mpi_size)/log(2);
    ti = tf;
    tf = tf - (float)ti;
    if(tf > 0)
    {
        if(idOld == 0) {
            printf("Cant use all possible nodes! Will only use the first [%d] of [%d].\n",1<<ti, mpi_size);
            fflush(stdout);
        }
        mpi_size = 1 << ti;

        if(idOld < mpi_size)
        {
            activeNode = 1;

            // Split comm into two group
            MPI_Comm_split(MPI_COMM_WORLD, 0, idOld, &mpi_comm);
            MPI_Comm_group( mpi_comm, &mpi_group);

            //printf("[%d] Creating new group with [%d] members.\n",idOld, mpi_size);
            //printf("Created new group of size [%d] with id [%d] -> [%d]\n", mpi_size, idOld, mpi_id); fflush(stdout);

        } else {
            activeNode = 0;
            MPI_Comm_split(MPI_COMM_WORLD, 1, idOld - mpi_size, &mpi_comm);
            MPI_Comm_group( mpi_comm, &mpi_group);

            //printf("Node [%d] wont participate.\n", idOld);
        }
    } else {
        activeNode = 1;
        //If the number of nodes is alright, use the standard world comm
        MPI_Comm_group( MPI_COMM_WORLD, &mpi_group);
        MPI_Comm_create( MPI_COMM_WORLD, mpi_group, &mpi_comm);
    }

    MPI_Comm_size( mpi_comm, &mpi_size );
    MPI_Comm_rank( mpi_comm, &mpi_id );
    return activeNode;
}
开发者ID:mapa17,项目名称:closestPair,代码行数:63,代码来源:closestPair_tools.c

示例6: PetscSubcommCreate_interlaced

PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm psubcomm)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank,size,*subsize,duprank,subrank;
  PetscMPIInt    np_subcomm,nleftover,i,j,color,nsubcomm=psubcomm->n;
  MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;

  PetscFunctionBegin;
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);

  /* get size of each subcommunicator */
  ierr = PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);CHKERRQ(ierr);

  np_subcomm = size/nsubcomm;
  nleftover  = size - nsubcomm*np_subcomm;
  for (i=0; i<nsubcomm; i++) {
    subsize[i] = np_subcomm;
    if (i<nleftover) subsize[i]++;
  }

  /* find color for this proc */
  color   = rank%nsubcomm;
  subrank = rank/nsubcomm;

  ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);

  j = 0; duprank = 0;
  for (i=0; i<nsubcomm; i++) {
    if (j == color) {
      duprank += subrank;
      break;
    }
    duprank += subsize[i]; j++;
  }

  /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
  ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
  {
    PetscThreadComm tcomm;
    ierr = PetscCommGetThreadComm(comm,&tcomm);CHKERRQ(ierr);
    ierr = MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
    ierr = MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
  }
  ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);CHKERRQ(ierr);
  ierr = PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);

  psubcomm->color   = color;
  psubcomm->subsize = subsize;
  psubcomm->type    = PETSC_SUBCOMM_INTERLACED;
  PetscFunctionReturn(0);
}
开发者ID:hsahasra,项目名称:petsc-magma-dense-mat,代码行数:56,代码来源:subcomm.c

示例7: PetscSubcommCreate_contiguous

PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm psubcomm)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank,size,*subsize,duprank=-1,subrank=-1;
  PetscMPIInt    np_subcomm,nleftover,i,color=-1,rankstart,nsubcomm=psubcomm->n;
  MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;

  PetscFunctionBegin;
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);

  /* get size of each subcommunicator */
  ierr = PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);CHKERRQ(ierr);

  np_subcomm = size/nsubcomm;
  nleftover  = size - nsubcomm*np_subcomm;
  for (i=0; i<nsubcomm; i++) {
    subsize[i] = np_subcomm;
    if (i<nleftover) subsize[i]++;
  }

  /* get color and subrank of this proc */
  rankstart = 0;
  for (i=0; i<nsubcomm; i++) {
    if (rank >= rankstart && rank < rankstart+subsize[i]) {
      color   = i;
      subrank = rank - rankstart;
      duprank = rank;
      break;
    } else rankstart += subsize[i];
  }

  ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);

  /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
  ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
  {
    PetscThreadComm tcomm;
    ierr = PetscCommGetThreadComm(comm,&tcomm);CHKERRQ(ierr);
    ierr = MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
    ierr = MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
  }
  ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);CHKERRQ(ierr);
  ierr = PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);

  psubcomm->color   = color;
  psubcomm->subsize = subsize;
  psubcomm->type    = PETSC_SUBCOMM_CONTIGUOUS;
  PetscFunctionReturn(0);
}
开发者ID:hsahasra,项目名称:petsc-magma-dense-mat,代码行数:54,代码来源:subcomm.c

示例8: initialize

static void initialize(int nid) {
    MPI_Comm_rank(MPI_COMM_WORLD, &ThisTask);

    /* First split into ranks on the same node */
    MPI_Comm_split(MPI_COMM_WORLD, nid, ThisTask, &NODE_GROUPS);

    MPI_Comm_rank(NODE_GROUPS, &NodeRank);

    /* Next split by Node Rank */
    MPI_Comm_split(MPI_COMM_WORLD, NodeRank, ThisTask, &NODE_LEADERS);

}
开发者ID:rainwoodman,项目名称:python-mpi-bcast,代码行数:12,代码来源:bcast.c

示例9: Set_Communicator

void Set_Communicator( struct comm_info *c_info )
/**********************************************************************

----------------------------------------------------------------------
             VARIABLE              |       TYPE        |   MEANING
----------------------------------------------------------------------
Input      :                       |                   |
Output     :                       |                   |
                                   |                   |
In/Out     :  c_info               | struct comm_info* | see comm_info.h
                                   |                   | Communicator set up
-----------------------------------------------------------------------

-----------------------------------------------------------------------
Description: Initialization of communicators by group coloring
---------------------------------------------------------------------*/
{
    int color,key,i;

    /* insert choice for communicator here;
       NOTE   :  globally more than one communicator is allowed
       Example: grouping of pairs of processes:
       0 0 1 1 2 2  .. (if even),  UNDEF 0 0 1 1 2 2  .. (if odd)
       */

    if( c_info->communicator != MPI_COMM_NULL &&
            c_info->communicator != MPI_COMM_SELF &&
            c_info->communicator != MPI_COMM_WORLD)
    {
        i=MPI_Comm_free(&c_info->communicator);
        Err_Hand(1,i);
    }

    if(c_info->group_mode >= 0)
    {
        i=c_info->w_rank;
        color = i/c_info->NP;
        c_info->group_no = color;
        key = 0;
        if(color >= c_info->w_num_procs/c_info->NP) color=MPI_UNDEFINED;
        MPI_Comm_split(MPI_COMM_WORLD, color, key, &c_info->communicator);
    }
    /* Default choice and Group definition.  */
    else
    {
        if(c_info->w_rank < c_info->NP) color=0;
        else color=MPI_UNDEFINED;
        c_info->group_no = 0;
        key=0;
        MPI_Comm_split(MPI_COMM_WORLD, color, key, &c_info->communicator);
    }
}
开发者ID:01org,项目名称:opa-mpi-apps,代码行数:52,代码来源:pmb_init.c

示例10: assignment

/*@C
  PetscSubcommSetTypeGeneral - Set a PetscSubcomm from user's specifications

   Collective on MPI_Comm

   Input Parameter:
+  psubcomm - PetscSubcomm context
.  color   - control of subset assignment (nonnegative integer). Processes with the same color are in the same subcommunicator.
-  subrank - rank in the subcommunicator

   Level: advanced

.keywords: communicator, create

.seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetType()
@*/
PetscErrorCode PetscSubcommSetTypeGeneral(PetscSubcomm psubcomm,PetscMPIInt color,PetscMPIInt subrank)
{
  PetscErrorCode ierr;
  MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;
  PetscMPIInt    size,icolor,duprank,*recvbuf,sendbuf[3],mysubsize,rank,*subsize;
  PetscMPIInt    i,nsubcomm=psubcomm->n;

  PetscFunctionBegin;
  if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
  if (nsubcomm < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %d is incorrect. Call PetscSubcommSetNumber()",nsubcomm);

  ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);

  /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
  /* TODO: this can be done in an ostensibly scalale way (i.e., without allocating an array of size 'size') as is done in PetscObjectsCreateGlobalOrdering(). */
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr = PetscMalloc1(2*size,&recvbuf);CHKERRQ(ierr);

  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(subcomm,&mysubsize);CHKERRQ(ierr);

  sendbuf[0] = color;
  sendbuf[1] = mysubsize;
  ierr = MPI_Allgather(sendbuf,2,MPI_INT,recvbuf,2,MPI_INT,comm);CHKERRQ(ierr);

  ierr = PetscCalloc1(nsubcomm,&subsize);CHKERRQ(ierr);
  for (i=0; i<2*size; i+=2) {
    subsize[recvbuf[i]] = recvbuf[i+1];
  }
  ierr = PetscFree(recvbuf);CHKERRQ(ierr);

  duprank = 0;
  for (icolor=0; icolor<nsubcomm; icolor++) {
    if (icolor != color) { /* not color of this process */
      duprank += subsize[icolor];
    } else {
      duprank += subrank;
      break;
    }
  }
  ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);

  ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);CHKERRQ(ierr);
  ierr = PetscCommDuplicate(subcomm,&psubcomm->child,NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);

  psubcomm->color   = color;
  psubcomm->subsize = subsize;
  psubcomm->type    = PETSC_SUBCOMM_GENERAL;
  PetscFunctionReturn(0);
}
开发者ID:masa-ito,项目名称:PETScToPoisson,代码行数:68,代码来源:subcomm.c

示例11: main

int main(int argc, char **argv)
{
	MPI_Comm c2;
	int rank;
	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	if (rank == 1) {
		MPI_Comm_split(MPI_COMM_WORLD, -10, 0, &c2);
	} else {
		MPI_Comm_split(MPI_COMM_WORLD, 10, 0, &c2);
	}
	MPI_Finalize();
	return 0;
}
开发者ID:msurkovsky,项目名称:aislinn,代码行数:14,代码来源:split-invalid.cpp

示例12: main

int main(int argc, char** argv) {

    unsigned world_rank = 0, local_rank = 0;
    unsigned world_count = 0, local_count = 0;
    unsigned num_of_groups = 8, group = 0;
    
    //New communicator
    MPI_Comm COMM_LOCAL;
    
    //Basic stuff
    MPI_Init(&argc, &argv);    
    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &world_count);
    
    //Figure out which node go where and their local rank.
    group = world_rank % num_of_groups;
    local_rank = world_rank / num_of_groups;
    
    //Add processors from COMM_WORLD to their respective COMM_LOCAL
    MPI_Comm_split(MPI_COMM_WORLD, group, local_rank, &COMM_LOCAL);
    
    //Get number of processors in each communicator
    MPI_Comm_size(COMM_LOCAL, &local_count);
    
    //use if statement to control which processors print output
    //if(group == 2), if(local_rank == 3), etc
    if(1)
        printf("Group %d of %d, Local rank: %d of %d, Global rank: %d of %d\n", group, num_of_groups, local_rank, local_count, world_rank, world_count);
    
    MPI_Finalize();
    return (EXIT_SUCCESS);
}
开发者ID:026rus,项目名称:parallel_graham,代码行数:32,代码来源:custom_communicators.c

示例13: main

int main(int argc, char* argv[]) {
  int id=0, numprocs;
  int color;
  MPI_Comm local;
  double t1, t2;

  ELG_USER_START("main");

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &id);
  printf("%03d: ctest-elg start\n", id);

  /* define Cartesian topology */
  elg_cart_create(2, (numprocs+1)/2, 0, 1, 1, 0);
  elg_cart_coords(id%2, id/2, 0);

  color = (id >= numprocs/2);
  MPI_Comm_split(MPI_COMM_WORLD, color, id, &local);

  t1 = MPI_Wtime();
  parallel(MPI_COMM_WORLD);
  parallel(local);
  parallel(MPI_COMM_WORLD);
  t2 = MPI_Wtime();

  MPI_Comm_free(&local);
  MPI_Finalize();
  printf("%03d: ctest-elg end  (%12.9f)\n", id, (t2-t1));

  ELG_USER_END("main");
  return 0;
}
开发者ID:linearregression,项目名称:scalasca,代码行数:33,代码来源:ctest-elg.c

示例14: PX

void PX(split_cart_procmesh_for_3dto2d_remap_q1)(
    const INT *n, MPI_Comm comm_cart_3d,
    MPI_Comm *comm_1d
    )
{
  int p0, p1, q0=0, q1=0;
  int ndims, coords_3d[3];
  int dim_1d, period_1d, reorder=0;
  int color, key;
  MPI_Comm comm;

  if( !PX(is_cart_procmesh)(comm_cart_3d) )
    return;

  MPI_Cartdim_get(comm_cart_3d, &ndims);
  if(ndims != 3)
    return;

  PX(get_mpi_cart_coords)(comm_cart_3d, ndims, coords_3d);
  PX(get_procmesh_dims_2d)(n, comm_cart_3d, &p0, &p1, &q0, &q1);

  /* split into p0*p1*q0 comms of size q1 */
  color = coords_3d[0]*p1*q0 + coords_3d[1]*q0 + coords_3d[2]/q1;
  key = coords_3d[2]%q1;
//   key = coords_3d[2]/q0; /* TODO: delete this line after several tests */
  MPI_Comm_split(comm_cart_3d, color, key, &comm);

  dim_1d = q1; period_1d = 1;
  MPI_Cart_create(comm, ndims=1, &dim_1d, &period_1d, reorder,
      comm_1d);

  MPI_Comm_free(&comm);
}
开发者ID:arnolda,项目名称:scafacos,代码行数:33,代码来源:procmesh.c

示例15: splitComm

inline MPI_Comm splitComm(int color, MPI_Comm globalComm)
{
    int myGlobalId = getProcId(globalComm);
    MPI_Comm localComm;
    MPI_Comm_split(MPI_COMM_WORLD, color, myGlobalId, &localComm);
    return localComm;
}
开发者ID:rainiscold,项目名称:trilinos,代码行数:7,代码来源:DebugTool.hpp


注:本文中的MPI_Comm_split函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。