本文整理汇总了C++中MPI_Group_incl函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Group_incl函数的具体用法?C++ MPI_Group_incl怎么用?C++ MPI_Group_incl使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MPI_Group_incl函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: init_comms
void init_comms(void){
extern int numtasks, rank,
myfieldrank, myenglandrank, mybrazilrank,
field_ranks[12], eng_ranks[11], bra_ranks[11];
extern MPI_Group world, england, brazil, engfield, brafield, field;
extern MPI_Comm eng_comm, bra_comm, engfield_comm, brafield_comm, field_comm;
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (numtasks != NPROCS)
{
printf("Must specify MP_PROCS= %d. Terminating.\n",NPROCS);
MPI_Finalize();
exit(EXIT_FAILURE);
}
MPI_Comm_group(MPI_COMM_WORLD, &world);
MPI_Group_incl(world, 12, field_ranks, &field);
MPI_Group_incl(world, 11, eng_ranks, &england);
MPI_Group_incl(world, 11, bra_ranks, &brazil);
MPI_Group_union(field, england, &engfield);
MPI_Group_union(field, brazil, &brafield);
MPI_Comm_create(MPI_COMM_WORLD, field, &field_comm);
MPI_Comm_create(MPI_COMM_WORLD, england, &eng_comm);
MPI_Comm_create(MPI_COMM_WORLD, brazil, &bra_comm);
MPI_Comm_create(MPI_COMM_WORLD, engfield, &engfield_comm);
MPI_Comm_create(MPI_COMM_WORLD, brafield, &brafield_comm);
MPI_Group_rank (field, &myfieldrank);
MPI_Group_rank (england, &myenglandrank);
MPI_Group_rank (brazil, &mybrazilrank);
}
示例2: main
int main( int argc, char **argv )
{
int rank, size, i;
MPI_Group group1, group2, group3, groupall, groupunion, newgroup;
MPI_Comm newcomm;
int ranks1[100], ranks2[100], ranks3[100];
int nranks1=0, nranks2=0, nranks3=0;
MPI_Init( &argc, &argv );
MPI_Barrier( MPI_COMM_WORLD );
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
MPI_Comm_size( MPI_COMM_WORLD, &size );
MPI_Comm_group( MPI_COMM_WORLD, &groupall );
/* Divide groups */
for (i=0; i<size; i++)
if ( (i%3)==0 )
ranks1[nranks1++] = i;
else if ( (i%3)==1 )
ranks2[nranks2++] = i;
else
ranks3[nranks3++] = i;
MPI_Group_incl ( groupall, nranks1, ranks1, &group1 );
MPI_Group_incl ( groupall, nranks2, ranks2, &group2 );
MPI_Group_incl ( groupall, nranks3, ranks3, &group3 );
MPI_Group_difference ( groupall, group2, &groupunion );
MPI_Comm_create ( MPI_COMM_WORLD, group3, &newcomm );
newgroup = MPI_GROUP_NULL;
if (newcomm != MPI_COMM_NULL)
{
/* If we don't belong to group3, this would fail */
MPI_Comm_group ( newcomm, &newgroup );
}
/* Free the groups */
MPI_Group_free( &groupall );
MPI_Group_free( &group1 );
MPI_Group_free( &group2 );
MPI_Group_free( &group3 );
MPI_Group_free( &groupunion );
if (newgroup != MPI_GROUP_NULL)
{
MPI_Group_free( &newgroup );
}
/* Free the communicator */
if (newcomm != MPI_COMM_NULL)
MPI_Comm_free( &newcomm );
Test_Waitforall( );
MPI_Finalize();
return 0;
}
示例3: main
int main( int argc, char *argv[] )
{
int errs = 0;
int rc;
int ranks[2];
MPI_Group ng;
char str[MPI_MAX_ERROR_STRING+1];
int slen;
MTest_Init( &argc, &argv );
/* Set errors return */
MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
/* Create some valid input data except for the group handle */
ranks[0] = 0;
rc = MPI_Group_incl( MPI_COMM_WORLD, 1, ranks, &ng );
if (rc == MPI_SUCCESS) {
errs ++;
printf( "Did not detect invalid handle (comm) in group_incl\n" );
}
else {
if (verbose) {
MPI_Error_string( rc, str, &slen );
printf( "Found expected error; message is: %s\n", str );
}
}
MTest_Finalize( errs );
MPI_Finalize( );
return 0;
}
示例4: main
int main(int argc, char *argv[]) {
int rank, new_rank, sendbuf, recvbuf, numtasks;
int P[4][4]={{0,1,2,3}, {4,5,6,7}, {8,9,10,11}, {12,13,14,15} };
MPI_Group orig_group, new_group;
MPI_Comm new_comm;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
if (numtasks != NPROCS) {
printf("Must specify MP_PROCS= %d. Terminating.\n",NPROCS);
MPI_Finalize();
exit(0);
}
sendbuf = rank;
MPI_Comm_group(MPI_COMM_WORLD, &orig_group);
MPI_Group_incl(orig_group, NPROCS/4, P[rank/4], &new_group);
MPI_Comm_create(MPI_COMM_WORLD, new_group, &new_comm);
MPI_Allreduce(&sendbuf, &recvbuf, 1, MPI_INT, MPI_SUM, new_comm);
MPI_Group_rank (new_group, &new_rank);
printf("rank= %2d newgroup= %2d newrank= %2d recvbuf= %2d\n",rank,rank/4,new_rank,recvbuf);
MPI_Finalize();
return 0;
}
示例5: mpi_group_incl_
void mpi_group_incl_(int* group, int* n, int* ranks, int* group_out, int* ierr) {
MPI_Group tmp;
*ierr = MPI_Group_incl(get_group(*group), *n, ranks, &tmp);
if(*ierr == MPI_SUCCESS) {
*group_out = new_group(tmp);
}
}
示例6: edges
/*@C
PetscSFGetGroups - gets incoming and outgoing process groups
Collective
Input Argument:
. sf - star forest
Output Arguments:
+ incoming - group of origin processes for incoming edges (leaves that reference my roots)
- outgoing - group of destination processes for outgoing edges (roots that I reference)
Level: developer
.seealso: PetscSFGetWindow(), PetscSFRestoreWindow()
@*/
PetscErrorCode PetscSFGetGroups(PetscSF sf,MPI_Group *incoming,MPI_Group *outgoing)
{
PetscErrorCode ierr;
MPI_Group group;
PetscFunctionBegin;
if (sf->ingroup == MPI_GROUP_NULL) {
PetscInt i;
const PetscInt *indegree;
PetscMPIInt rank,*outranks,*inranks;
PetscSFNode *remote;
PetscSF bgcount;
/* Compute the number of incoming ranks */
ierr = PetscMalloc1(sf->nranks,&remote);CHKERRQ(ierr);
for (i=0; i<sf->nranks; i++) {
remote[i].rank = sf->ranks[i];
remote[i].index = 0;
}
ierr = PetscSFDuplicate(sf,PETSCSF_DUPLICATE_CONFONLY,&bgcount);CHKERRQ(ierr);
ierr = PetscSFSetGraph(bgcount,1,sf->nranks,NULL,PETSC_COPY_VALUES,remote,PETSC_OWN_POINTER);CHKERRQ(ierr);
ierr = PetscSFComputeDegreeBegin(bgcount,&indegree);CHKERRQ(ierr);
ierr = PetscSFComputeDegreeEnd(bgcount,&indegree);CHKERRQ(ierr);
/* Enumerate the incoming ranks */
ierr = PetscMalloc2(indegree[0],&inranks,sf->nranks,&outranks);CHKERRQ(ierr);
ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);CHKERRQ(ierr);
for (i=0; i<sf->nranks; i++) outranks[i] = rank;
ierr = PetscSFGatherBegin(bgcount,MPI_INT,outranks,inranks);CHKERRQ(ierr);
ierr = PetscSFGatherEnd(bgcount,MPI_INT,outranks,inranks);CHKERRQ(ierr);
ierr = MPI_Comm_group(PetscObjectComm((PetscObject)sf),&group);CHKERRQ(ierr);
ierr = MPI_Group_incl(group,indegree[0],inranks,&sf->ingroup);CHKERRQ(ierr);
ierr = MPI_Group_free(&group);CHKERRQ(ierr);
ierr = PetscFree2(inranks,outranks);CHKERRQ(ierr);
ierr = PetscSFDestroy(&bgcount);CHKERRQ(ierr);
}
*incoming = sf->ingroup;
if (sf->outgroup == MPI_GROUP_NULL) {
ierr = MPI_Comm_group(PetscObjectComm((PetscObject)sf),&group);CHKERRQ(ierr);
ierr = MPI_Group_incl(group,sf->nranks,sf->ranks,&sf->outgroup);CHKERRQ(ierr);
ierr = MPI_Group_free(&group);CHKERRQ(ierr);
}
*outgoing = sf->outgroup;
PetscFunctionReturn(0);
}
示例7: MPI_Comm_group
bool SplitMPI_Communicator::CreateCommunicator(MPI_Comm comm_world, int np, int nb_ddc)
{
int n_DDC;
bool splitcomm;
if ((nb_ddc > 0) && (nb_ddc < np))
{ // if the number of total cores is larger than the number of DDCs is the same, two new MPI groups will be
// generated will be generated
#ifdef OGS_FEM_IPQC
splitcomm = true;
n_DDC = nb_ddc; // number of ddc
int DDC_ranks[n_DDC];
for (int k = 0; k < n_DDC; k++)
{
DDC_ranks[k] = k;
}
MPI_Comm comm_IPQC;
MPI_Group group_base, group_DDC, group_IPQC;
// define MPI group and communicator for DDC related processes WH
MPI_Comm_group(comm_world, &group_base);
MPI_Group_incl(group_base, n_DDC, DDC_ranks, &group_DDC); // define group flow and mass transport
MPI_Comm_create(comm_world, group_DDC, &comm_DDC);
// define MPI group and communicator for IPQC WH
MPI_Group_difference(group_base, group_DDC, &group_IPQC);
MPI_Comm_create(comm_world, group_IPQC, &comm_IPQC);
int myrank_IPQC, mysize_IPQC;
MPI_Group_size(group_DDC, &mysize); // WH
MPI_Group_rank(group_DDC, &myrank); // WH
MPI_Group_rank(group_IPQC, &myrank_IPQC);
MPI_Group_size(group_IPQC, &mysize_IPQC);
if (myrank_IPQC != MPI_UNDEFINED) // WH
std::cout << "After MPI_Init myrank_IPQC = " << myrank_IPQC << '\n';
if (myrank != MPI_UNDEFINED) // WH
std::cout << "After MPI_Init myrank_DDC = " << myrank << '\n';
if (myrank_IPQC != MPI_UNDEFINED) // ranks of group_IPQC will call to IPhreeqc
Call_IPhreeqc();
#endif
}
else
{ // if no -ddc is specified or the number of ddc is incorrect, make ddc = np, no new MPI groups willnot be
// generated;
splitcomm = false;
n_DDC = np;
comm_DDC = comm_world;
MPI_Comm_size(comm_DDC, &mysize);
MPI_Comm_rank(comm_DDC, &myrank);
std::cout << "After MPI_Init myrank_DDC = " << myrank << '\n';
}
return splitcomm;
}
示例8: main
int main(int argc, char **argv)
{
int rank, nproc, mpi_errno;
int i, ncomm, *ranks;
int errs = 1;
MPI_Comm *comm_hdls;
MPI_Group world_group;
MTest_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
comm_hdls = malloc(sizeof(MPI_Comm) * MAX_NCOMM);
ranks = malloc(sizeof(int) * nproc);
ncomm = 0;
for (i = 0; i < MAX_NCOMM; i++) {
int incl = i % nproc;
MPI_Group comm_group;
/* Comms include ranks: 0; 1; 2; ...; 0; 1; ... */
MPI_Group_incl(world_group, 1, &incl, &comm_group);
/* Note: the comms we create all contain one rank from MPI_COMM_WORLD */
mpi_errno = MPI_Comm_create(MPI_COMM_WORLD, comm_group, &comm_hdls[i]);
if (mpi_errno == MPI_SUCCESS) {
if (verbose)
printf("%d: Created comm %d\n", rank, i);
ncomm++;
} else {
if (verbose)
printf("%d: Error creating comm %d\n", rank, i);
MPI_Group_free(&comm_group);
errs = 0;
break;
}
MPI_Group_free(&comm_group);
}
for (i = 0; i < ncomm; i++)
MPI_Comm_free(&comm_hdls[i]);
free(comm_hdls);
free(ranks);
MPI_Group_free(&world_group);
MTest_Finalize(errs);
return MTestReturnValue(errs);
}
示例9: build_inter_win_comm
void build_inter_win_comm() {
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
// Build Up Inter-Windows Communicators and Groups
int* ranks;
ranks = new int[2 * num_procs_per_win];
mpi_inter_win_comm = new MPI_Comm*[DIM];
mpi_inter_win_group = new MPI_Group*[DIM];
for (int i = 0; i < DIM; ++i) {
mpi_inter_win_comm[i] = new MPI_Comm[num_inter_win_comm[i]];
mpi_inter_win_group[i] = new MPI_Group[num_inter_win_comm[i]];
}
for (int d = 0; d < DIM; ++d) {
for (int i = 0; i < num_inter_win_comm[d]; ++i) {
for (int j = 0; j < 2 * num_procs_per_win; ++j) {
if (d == 0) {
int m = i + i / (num_wins_dim[d] - 1);
ranks[j] = m * num_procs_per_win + j;
} else {
int m = i;
if (j < num_procs_per_win)
ranks[j] = m * num_procs_per_win + j;
else
ranks[j] = (m + num_wins_dim[0] - 1) * num_procs_per_win + j;
}
}
MPI_Group_incl(world_group, 2 * num_procs_per_win, ranks, &mpi_inter_win_group[d][i]);
MPI_Comm_create(MPI_COMM_WORLD, mpi_inter_win_group[d][i], &mpi_inter_win_comm[d][i]);
}
}
delete [] ranks;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
// Get Own ID in Inter-Win Communicator
int line, row;
line = (int) (pid_world_comm / (num_wins_dim[0] * num_procs_per_win));
row = (int) ((pid_world_comm / num_procs_per_win) % num_wins_dim[0]);
inter_win_comm_id[Right] = ((row == num_wins_dim[0] - 1) ? -1 : (num_wins_dim[0] - 1) * line + row);
inter_win_comm_id[Left] = ((row == 0) ? -1 : (num_wins_dim[0] - 1) * line + row - 1);
inter_win_comm_id[Up] = ((line == num_wins_dim[1] - 1) ? -1 : num_wins_dim[0] * line + row);
inter_win_comm_id[Down] = ((line == 0) ? -1 : num_wins_dim[0] * (line - 1) + row);
for (int i = 0; i < pow(2.0, DIM); ++i) {
if (inter_win_comm_id[i] != -1) {
MPI_Comm_rank(mpi_inter_win_comm[i / DIM][inter_win_comm_id[i]], &pid_inter_win_comm[i]);
} else {
pid_inter_win_comm[i] = -1;
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
示例10: dart_group_union
dart_ret_t dart_group_union(
const dart_group_t *g1,
const dart_group_t *g2,
dart_group_t *gout)
{
/* g1 and g2 are both ordered groups. */
int ret = MPI_Group_union(
g1->mpi_group,
g2->mpi_group,
&(gout -> mpi_group));
if (ret == MPI_SUCCESS) {
int i, j, k, size_in, size_out;
dart_unit_t *pre_unitidsout, *post_unitidsout;;
MPI_Group group_all;
MPI_Comm_group(MPI_COMM_WORLD, &group_all);
MPI_Group_size(gout->mpi_group, &size_out);
if (size_out > 1) {
MPI_Group_size(g1->mpi_group, &size_in);
pre_unitidsout = (dart_unit_t *)malloc(
size_out * sizeof (dart_unit_t));
post_unitidsout = (dart_unit_t *)malloc(
size_out * sizeof (dart_unit_t));
dart_group_getmembers (gout, pre_unitidsout);
/* Sort gout by the method of 'merge sort'. */
i = k = 0;
j = size_in;
while ((i <= size_in - 1) && (j <= size_out - 1)) {
post_unitidsout[k++] =
(pre_unitidsout[i] <= pre_unitidsout[j])
? pre_unitidsout[i++]
: pre_unitidsout[j++];
}
while (i <= size_in -1) {
post_unitidsout[k++] = pre_unitidsout[i++];
}
while (j <= size_out -1) {
post_unitidsout[k++] = pre_unitidsout[j++];
}
gout -> mpi_group = MPI_GROUP_EMPTY;
MPI_Group_incl(
group_all,
size_out,
post_unitidsout,
&(gout->mpi_group));
free (pre_unitidsout);
free (post_unitidsout);
}
ret = DART_OK;
}
return ret;
}
示例11: main
int main(int argc, char **argv) {
int rank, nproc, mpi_errno;
int i, ncomm, *ranks;
int errors = 1;
MPI_Comm *comm_hdls;
MPI_Group world_group;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
comm_hdls = malloc(sizeof(MPI_Comm) * MAX_NCOMM);
ranks = malloc(sizeof(int) * nproc);
for (i = 0; i < nproc; i++)
ranks[i] = i;
ncomm = 0;
for (i = 0; i < MAX_NCOMM; i++) {
MPI_Group comm_group;
/* Comms include ranks: 0; 0,1; 0,1,2; ...; 0; 0,1; 0,1,2; ... */
MPI_Group_incl(world_group, (i+1) % (nproc+1), /* Adding 1 yields counts of 1..nproc */
ranks, &comm_group);
/* Note: the comms we create are all varying subsets of MPI_COMM_WORLD */
mpi_errno = MPI_Comm_create(MPI_COMM_WORLD, comm_group, &comm_hdls[i]);
if (mpi_errno == MPI_SUCCESS) {
ncomm++;
} else {
if (verbose) printf("%d: Error creating comm %d\n", rank, i);
MPI_Group_free(&comm_group);
errors = 0;
break;
}
MPI_Group_free(&comm_group);
}
for (i = 0; i < ncomm; i++)
MPI_Comm_free(&comm_hdls[i]);
free(comm_hdls);
MPI_Group_free(&world_group);
MTest_Finalize(errors);
MPI_Finalize();
return 0;
}
示例12: main
int main(int argc, char *argv[])
{
int rank;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Group gw, g1, g2, g3;
MPI_Comm_group(MPI_COMM_WORLD, &gw);
int new_ranks[] = {0, 2, 3};
MPI_Group_incl(gw, 3, new_ranks, &g1);
int new_ranks2[] = {2, 3, 0};
MPI_Group_incl(gw, 3, new_ranks2, &g2);
MPI_Group_incl(gw, 3, new_ranks2, &g3);
int size1, size2, size3;
MPI_Group_size(g1, &size1);
MPI_Group_size(g1, &size2);
MPI_Group_size(g1, &size3);
if (size1 != size2 || size2 != size3 || size1 != 3) {
return 1;
}
int r1, r2;
MPI_Group_compare(g1, g2, &r1);
MPI_Group_compare(g2, g3, &r2);
if (r1 != MPI_SIMILAR || r2 != MPI_IDENT) {
return 1;
}
MPI_Group_free(&g1);
MPI_Group_free(&g3);
MPI_Group_free(&g2);
return 0;
}
示例13: MPI_Comm_group
void MPIDistribution::MPICreateCommLayer()
{
if(m_population->network()->MPIGetNodeId() == 0 && DEBUG_LEVEL > 2)
{
cout<<"Creating MPI Communicator for layer...";cout.flush();
}
vector<int> localHypercolumns = ((PopulationColumns*)m_population)->GetLocalHypercolumnIndexes();
MPI_Group orig_group, new_group;
MPI_Comm_group(NETWORK_COMM_WORLD, &orig_group);
MPI_Comm* new_comm = new MPI_Comm();
//debug
//*new_comm = NETWORK_COMM_WORLD;
//end debug
vector<int> nodeLayerIndexes = m_population->GetNodeLayerIndexes();
vector<int> mpiProcsUsed = m_population->MPIGetProcessesUsed();
// currently not set here in this way but may be changed
/*
MPI_Group_incl(orig_group, nodeLayerIndexes.size(), &(nodeLayerIndexes[0]), &new_group);
MPI_Comm_create(NETWORK_COMM_WORLD, new_group, new_comm);
*/
//m_mpiCommLayer = new_comm;
m_mpiCommLayer = new MPI_Comm();
if(mpiProcsUsed.size()==0) // all used
{
*m_mpiCommLayer = NETWORK_COMM_WORLD; // currently may result in bug if this assumption not correct and trying to record
MPI_Comm_size(NETWORK_COMM_WORLD, &m_mpiSizeLocal);
MPI_Comm_rank(NETWORK_COMM_WORLD, &m_mpiRankLocal);
}
else
{
m_mpiCommLayer = new_comm;
MPI_Group_incl(orig_group, mpiProcsUsed.size(), &(mpiProcsUsed[0]), &new_group);
MPI_Comm_create(NETWORK_COMM_WORLD, new_group, new_comm);
if(binary_search(mpiProcsUsed.begin(),mpiProcsUsed.end(),m_population->network()->MPIGetNodeId()))
{
MPI_Comm_size(*m_mpiCommLayer, &m_mpiSizeLocal);
MPI_Comm_rank(*m_mpiCommLayer, &m_mpiRankLocal);
}
}
if(m_population->network()->MPIGetNodeId() == 0 && DEBUG_LEVEL > 2)
{
cout<<"done.\n";cout.flush();
}
m_commsLayersCreated = true;
}
示例14: main
int main(int argc, char *argv[])
{
int rank, destrank, nprocs, *A, *B, i;
MPI_Comm CommDeuce;
MPI_Group comm_group, group;
MPI_Win win;
int errs = 0;
MTest_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
if (nprocs < 2) {
printf("Run this program with 2 or more processes\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Comm_split(MPI_COMM_WORLD, (rank < 2), rank, &CommDeuce);
if (rank < 2)
{
i = MPI_Alloc_mem(SIZE2 * sizeof(int), MPI_INFO_NULL, &A);
if (i) {
printf("Can't allocate memory in test program\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
i = MPI_Alloc_mem(SIZE2 * sizeof(int), MPI_INFO_NULL, &B);
if (i) {
printf("Can't allocate memory in test program\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Comm_group(CommDeuce, &comm_group);
if (rank == 0) {
for (i=0; i<SIZE2; i++) A[i] = B[i] = i;
MPI_Win_create(NULL, 0, 1, MPI_INFO_NULL, CommDeuce, &win);
destrank = 1;
MPI_Group_incl(comm_group, 1, &destrank, &group);
MPI_Win_start(group, 0, win);
for (i=0; i<SIZE1; i++)
MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win);
for (i=0; i<SIZE1; i++)
MPI_Get(B+i, 1, MPI_INT, 1, SIZE1+i, 1, MPI_INT, win);
MPI_Win_complete(win);
for (i=0; i<SIZE1; i++)
if (B[i] != (-4)*(i+SIZE1)) {
SQUELCH( printf("Get Error: B[i] is %d, should be %d\n", B[i], (-4)*(i+SIZE1)); );
errs++;
}
示例15: main
int main( int argc, char **argv )
{
int i, n, n_goal = 2048, n_all, rc, n_ranks, *ranks, rank, size, len;
MPI_Group *group_array, world_group;
char msg[MPI_MAX_ERROR_STRING];
MPI_Init( &argc, &argv );
MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
MPI_Comm_size( MPI_COMM_WORLD, &size );
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
n = n_goal;
group_array = (MPI_Group *)malloc( n * sizeof(MPI_Group) );
MPI_Comm_group( MPI_COMM_WORLD, &world_group );
n_ranks = size;
ranks = (int *)malloc( size * sizeof(int) );
for (i=0; i<size; i++) ranks[i] = i;
for (i=0; i<n; i++) {
rc = MPI_Group_incl( world_group, n_ranks, ranks, group_array + i );
if (rc) {
fprintf( stderr, "Error when creating group number %d\n", i );
MPI_Error_string( rc, msg, &len );
fprintf( stderr, "%s\n", msg );
n = i + 1;
break;
}
}
for (i=0; i<n; i++) {
rc = MPI_Group_free( group_array + i );
if (rc) {
fprintf( stderr, "Error when freeing group number %d\n", i );
MPI_Error_string( rc, msg, &len );
fprintf( stderr, "%s\n", msg );
break;
}
}
MPI_Group_free( &world_group );
MPI_Allreduce( &n, &n_all, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD );
if (rank == 0) {
printf( "Completed test of %d type creations\n", n_all );
if (n_all != n_goal) {
printf (
"This MPI implementation limits the number of datatypes that can be created\n\
This is allowed by the standard and is not a bug, but is a limit on the\n\
implementation\n" );
}