当前位置: 首页>>代码示例>>C++>>正文


C++ mpi::Intracomm类代码示例

本文整理汇总了C++中mpi::Intracomm的典型用法代码示例。如果您正苦于以下问题:C++ Intracomm类的具体用法?C++ Intracomm怎么用?C++ Intracomm使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Intracomm类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: main

int main(int argc, char* argv[]) {
	MPI::Init(argc, argv);
	
	rank = MPI::COMM_WORLD.Get_rank();
	size = MPI::COMM_WORLD.Get_size();
	if (size < 2) MPI::COMM_WORLD.Abort(1);
	if (size < 1+COLS+ROWS) MPI::COMM_WORLD.Abort(1);
	
	MPI::Group globalGroup = MPI::COMM_WORLD.Get_group();

	if (0 == rank) {
		int matrix[COLS][ROWS], xirtam[ROWS][COLS];
	
		srand(time(0));
		for (int i=0; i<COLS; i++)
			for (int j=0; j<ROWS; j++) {
				matrix[i][j] = 9 * (double)rand() / (double)RAND_MAX;
				xirtam[j][i] = matrix[i][j];
			}
		
		cout << "random matrica: " << endl;
		for (int i=0; i<COLS; i++) {
			for (int j=0; j<ROWS; j++)
				cout << matrix[i][j] << " ";
			cout << endl;
		}
	}
	else {	
		MPI::Group group;
		MPI::Intracomm comm;

		int j=0, k=0;
		for (int i=1; i<size; i++)
			if (i % 2) ranksA[j++] = i;
			else ranksB[k++] = i;

		if (rank % 2) 
			group = globalGroup.Incl(size / 2 + size % 2, ranksA);
		else 
			group = globalGroup.Incl(size / 2, ranksB);

		comm = MPI::COMM_WORLD.Create(group);
		int newRank = comm.Get_rank();
	
	
		pline(); cout << rank << ", " << newRank << ", " << powerSum << endl;
		fflush(stdout); 

		group.Free();
		comm.Free();
	}
	
	
	
	//comm.Free();
	
	MPI::Finalize();
	
	return 0;
}
开发者ID:aaleksandar,项目名称:si4mps,代码行数:60,代码来源:dz5z10.cpp

示例2: GetBlockData

void LocalScalar3D<real>::Dump(BlockManager& blockManager, const int step, const char* label) {
	ImposeBoundaryCondition(blockManager);
	MPI::Intracomm comm = blockManager.getCommunicator();

	ostringstream ossFileNameTime;
	ossFileNameTime << "./BIN/";
	mkdir(ossFileNameTime.str().c_str(), 0755);

#ifdef _BLOCK_IS_LARGE_
#else
#endif
	for (int id = 0; id < blockManager.getNumBlock(); ++id) {
		BlockBase* block = blockManager.getBlock(id);

		::Vec3i size = block->getSize();
		Vec3d origin = block->getOrigin();
		Vec3d blockSize = block->getBlockSize();
		Vec3d cellSize = block->getCellSize();
		int level = block->getLevel();

		ostringstream ossFileName;
		ossFileName << "./BIN/";
		ossFileName << "dump-";
		ossFileName << label;
		ossFileName << "-";
		ossFileName.width(5);
		ossFileName.setf(ios::fixed);
		ossFileName.fill('0');
		ossFileName << comm.Get_rank();
		ossFileName << "-";
		ossFileName.width(5);
		ossFileName.setf(ios::fixed);
		ossFileName.fill('0');
		ossFileName << id;
		ossFileName << "-";
		ossFileName.width(10);
		ossFileName.setf(ios::fixed);
		ossFileName.fill('0');
		ossFileName << step;
		ossFileName << ".bin";

		int cx = size.x + 2*vc;
		int cy = size.y + 2*vc;
		int cz = size.z + 2*vc;
		int iNE = 1;

		real* pData = GetBlockData(block);

		ofstream ofs;
		ofs.open(ossFileName.str().c_str(), ios::out | ios::binary);
		ofs.write((char*)&size.x, sizeof(int));
		ofs.write((char*)&size.y, sizeof(int));
		ofs.write((char*)&size.z, sizeof(int));
		ofs.write((char*)&vc    , sizeof(int));
		ofs.write((char*)&iNE   , sizeof(int));
		ofs.write((char*)pData  , sizeof(real)*cx*cy*cz);
		ofs.close();
	}
}
开发者ID:avr-aics-riken,项目名称:BCMTools,代码行数:59,代码来源:LocalScalar3D.cpp

示例3: getDofNumbering

 /** \brief
  * In many situations a rank computes a number of local DOFs. Then all
  * ranks want to know the number of global DOFs and the starting
  * displacment number of the DOF numbering in each rank.
  *
  * \param[in]   mpiComm        The MPI communicator.
  * \param[in]   nRankDofs      The number of local DOFs.
  * \param[out]  rStartDofs     Displacment of the DOF numbering. On rank n
  *                             this is the sum of all local DOF numbers in
  *                             ranks 0 to n - 1.
  * \param[out]  nOverallDofs   Global sum of nRankDofs. Is equal on all
  *                             ranks.
  */
 inline void getDofNumbering(MPI::Intracomm& mpiComm,
                             int nRankDofs,
                             int& rStartDofs,
                             int& nOverallDofs)
 {
   rStartDofs = 0;
   nOverallDofs = 0;
   mpiComm.Scan(&nRankDofs, &rStartDofs, 1, MPI_INT, MPI_SUM);
   rStartDofs -= nRankDofs;
   mpiComm.Allreduce(&nRankDofs, &nOverallDofs, 1, MPI_INT, MPI_SUM);
 }
开发者ID:spraetor,项目名称:amdis2,代码行数:24,代码来源:MpiHelper.hpp

示例4: init_workers

// not necessary to create a new comm object
MPI::Intracomm init_workers(const MPI::Intracomm &comm_world, int managerid) {
	// get old group
	MPI::Group world_group = comm_world.Get_group();
	// create new group from old group
	int worker_size = comm_world.Get_size() - 1;
	int *workers = new int[worker_size];
	for (int i = 0, id = 0; i < worker_size; ++i, ++id) {
		if (id == managerid) ++id;  // skip the manager id
		workers[i] = id;
	}
	MPI::Group worker_group = world_group.Incl(worker_size, workers);
	delete [] workers;
	return comm_world.Create(worker_group);
}
开发者ID:SBU-BMI,项目名称:nscale,代码行数:15,代码来源:nu-features.cpp

示例5: computeNAtomTotal

   /*
   * Compute, store and return total number of atoms on all processors.
   */
   void AtomStorage::computeNAtomTotal(MPI::Intracomm& communicator)
   {
      // If nAtomTotal is already set, do nothing and return.
      // if (nAtomTotal_.isSet()) return;

      int nAtomLocal = nAtom();
      int nAtomTotal = 0;
      communicator.Reduce(&nAtomLocal, &nAtomTotal, 1, 
                          MPI::INT, MPI::SUM, 0);
      if (communicator.Get_rank() !=0) {
         nAtomTotal = 0;
      }
      nAtomTotal_.set(nAtomTotal);
   }
开发者ID:tdunn19,项目名称:simpatico,代码行数:17,代码来源:AtomStorage.cpp

示例6: setIoCommunicator

 void MpiFileIo::setIoCommunicator(MPI::Intracomm& communicator)
 {
    communicatorPtr_ = &communicator; 
    if (communicator.Get_rank() == 0) {
       isIoProcessor_ = true;
    } else {
       isIoProcessor_ = false;
    }
 }
开发者ID:TaherGhasimakbari,项目名称:simpatico,代码行数:9,代码来源:MpiFileIo.cpp

示例7: iSend

   /*
   * Send a block (nonblocking)
   */
   void MemoryOArchive::iSend(MPI::Intracomm& comm, MPI::Request& req, int dest)
   {
      int  comm_size = comm.Get_size();
      int  myRank = comm.Get_rank();

      // Preconditions
      if (dest > comm_size - 1 || dest < 0) {
         UTIL_THROW("Destination rank out of bounds");
      }
      if (dest == myRank) {
         UTIL_THROW("Source and desination identical");
      }

      size_t  sendBytes = cursor_ - buffer_;
      size_t* sizePtr = (size_t*)buffer_;
      *sizePtr = sendBytes;
      req = comm.Isend(buffer_, sendBytes, MPI::UNSIGNED_CHAR, dest, 5);
   }
开发者ID:TaherGhasimakbari,项目名称:simpatico,代码行数:21,代码来源:MemoryOArchive.cpp

示例8: recv

/*
* Receive a block.
*/
void PackedData::recv(MPI::Intracomm& comm, int source)
{
    MPI::Request request;
    int  myRank     = comm.Get_rank();
    int  comm_size  = comm.Get_size();

    // Preconditons
    if (source > comm_size - 1 || source < 0) {
        UTIL_THROW("Source rank out of bounds");
    }
    if (source == myRank) {
        UTIL_THROW("Source and desination identical");
    }

    request = comm.Irecv(begin_, capacity_, MPI::UNSIGNED_CHAR, source, 5);
    request.Wait();
    cursor_ = begin_;

}
开发者ID:jglaser,项目名称:simpatico,代码行数:22,代码来源:PackedData.cpp

示例9: sendRecv

   /*
   * Send and receive buffer.
   */
   void Buffer::sendRecv(MPI::Intracomm& comm, int source, int dest)
   {

      MPI::Request request[2];
      int  sendBytes = 0;
      int  myRank    = comm.Get_rank();
      int  comm_size = comm.Get_size();

      // Preconditions
      if (dest > comm_size - 1 || dest < 0) {
         UTIL_THROW("Destination rank out of bounds");
      }
      if (source > comm_size - 1 || source < 0) {
         UTIL_THROW("Source rank out of bounds");
      }
      if (dest == myRank) {
         UTIL_THROW("Destination and my rank are identical");
      }
      if (source == myRank) {
         UTIL_THROW("Source and my rank are identical");
      }

      // Start nonblocking receive.
      request[0] = comm.Irecv(recvBufferBegin_, bufferCapacity_ , 
                              MPI::CHAR, source, 5);

      // Start nonblocking send.
      sendBytes = sendPtr_ - sendBufferBegin_;
      request[1] = comm.Isend(sendBufferBegin_, sendBytes , MPI::CHAR, dest, 5);

      // Wait for completion of receive.
      request[0].Wait();
      recvPtr_ = recvBufferBegin_;

      // Wait for completion of send.
      request[1].Wait();

      // Update statistics.
      if (sendBytes > maxSendLocal_) {
         maxSendLocal_ = sendBytes;
      }
   }
开发者ID:pombredanne,项目名称:simpatico,代码行数:45,代码来源:Buffer.cpp

示例10: recv

   /*
   * Receive a buffer.
   */
   void Buffer::recv(MPI::Intracomm& comm, int source)
   {
      MPI::Request request;
      int  myRank     = comm.Get_rank();
      int  comm_size  = comm.Get_size();

      // Preconditons
      if (source > comm_size - 1 || source < 0) {
         UTIL_THROW("Source rank out of bounds");
      }
      if (source == myRank) {
         UTIL_THROW("Source and destination identical");
      }

      request = comm.Irecv(recvBufferBegin_, bufferCapacity_, 
                           MPI::CHAR, source, 5);
      request.Wait();
      recvType_ = NONE;
      recvPtr_ = recvBufferBegin_;
   }
开发者ID:pombredanne,项目名称:simpatico,代码行数:23,代码来源:Buffer.cpp

示例11: send

/*
* Send a block.
*/
void PackedData::send(MPI::Intracomm& comm, int dest)
{
    MPI::Request request;
    int  sendBytes = 0;
    int  comm_size = comm.Get_size();
    int  myRank = comm.Get_rank();

    // Preconditions
    if (dest > comm_size - 1 || dest < 0) {
        UTIL_THROW("Destination rank out of bounds");
    }
    if (dest == myRank) {
        UTIL_THROW("Source and desination identical");
    }

    sendBytes = cursor_ - begin_;
    request = comm.Isend(begin_, sendBytes, MPI::UNSIGNED_CHAR, dest, 5);
    request.Wait();

}
开发者ID:jglaser,项目名称:simpatico,代码行数:23,代码来源:PackedData.cpp

示例12: reduce

   /*
   * Reduce (add) distributions from multiple MPI processors.
   */
   void Distribution::reduce(MPI::Intracomm& communicator, int root)
   {
  
      long* totHistogram = new long[nBin_]; 
      communicator.Reduce(histogram_.cArray(), totHistogram, nBin_, MPI::LONG, MPI::SUM, root);
      if (communicator.Get_rank() == root) {
         for (int i=0; i < nBin_; ++i) {
            histogram_[i] = totHistogram[i];
         }
      } else { 
         for (int i=0; i < nBin_; ++i) {
            histogram_[i] = 0.0;
         }
      }
      delete totHistogram;

      long totSample; 
      communicator.Reduce(&nSample_, &totSample, 1, MPI::LONG, MPI::SUM, root);
      if (communicator.Get_rank() == root) {
         nSample_ = totSample;
      } else {
         nSample_ = 0;
      }

      long totReject; 
      communicator.Reduce(&nReject_, &totReject, 1, MPI::LONG, MPI::SUM, root);
      if (communicator.Get_rank() == root) {
         nReject_ = totReject;
      } else {
         nReject_ = 0;
      }

   }
开发者ID:TaherGhasimakbari,项目名称:simpatico,代码行数:36,代码来源:Distribution.cpp

示例13: bcast

   /*
   * Broadcast a buffer.
   */
   void Buffer::bcast(MPI::Intracomm& comm, int source)
   {
      int comm_size = comm.Get_size();
      int myRank = comm.Get_rank();
      if (source > comm_size - 1 || source < 0) {
         UTIL_THROW("Source rank out of bounds");
      }

      int sendBytes;
      if (myRank == source) {
         sendBytes = sendPtr_ - sendBufferBegin_;
         comm.Bcast(&sendBytes, 1, MPI::INT, source);
         comm.Bcast(sendBufferBegin_, sendBytes, MPI::CHAR, source);
         sendPtr_ = sendBufferBegin_;
         sendType_ = NONE;
      } else {
         comm.Bcast(&sendBytes, 1, MPI::INT, source);
         comm.Bcast(recvBufferBegin_, sendBytes, MPI::CHAR, source);
         recvPtr_ = recvBufferBegin_;
         recvType_ = NONE;
      }
      if (sendBytes > maxSendLocal_) {
         maxSendLocal_ = sendBytes;
      }

   }
开发者ID:pombredanne,项目名称:simpatico,代码行数:29,代码来源:Buffer.cpp

示例14: resample_popsizes_mh

// Metropolis-Hastings population size resampling; not used anymore
void resample_popsizes_mh(ArgModel *model, const LocalTrees *trees,
                       bool sample_popsize_recomb, double heat) {
    list<PopsizeConfigParam> &l = model->popsize_config.params;
    double curr_like = sample_popsize_recomb ? calc_arg_prior(model, trees) :
        calc_arg_prior_recomb_integrate(model, trees, NULL, NULL, NULL);
#ifdef ARGWEAVER_MPI
    MPI::Intracomm *comm = model->mc3.group_comm;
    int rank = comm->Get_rank();
    comm->Reduce(rank == 0 ? MPI_IN_PLACE : &curr_like,
                 &curr_like, 1, MPI::DOUBLE, MPI_SUM, 0);
#endif
    for (int rep=0; rep < model->popsize_config.numsample; rep++) {
        int idx=0;
        for (list<PopsizeConfigParam>::iterator it = l.begin();
             it != l.end(); it++) {
            curr_like =
                resample_single_popsize_mh(model, trees, sample_popsize_recomb,
                                           heat, it, curr_like, idx++);
        }
    }

}
开发者ID:mjhubisz,项目名称:argweaver,代码行数:23,代码来源:est_popsize.cpp

示例15: broadcast

/// Octree情報を他rankにブロードキャスト.
void BCMOctree::broadcast(MPI::Intracomm& comm)
{
  assert(comm.Get_rank() == 0);
  rootGrid->broadcast(comm);

  int numLeafNode = leafNodeArray.size();
  int ibuf[2];
  ibuf[0] = numLeafNode;
  ibuf[1] = ordering;
  comm.Bcast(&ibuf, 2, MPI::INT, 0);

  size_t size = Pedigree::GetSerializeSize();
  unsigned char* buf = new unsigned char[size * numLeafNode];

  size_t ip = 0;
  for (int id = 0; id < rootGrid->getSize(); id++) {
    packPedigrees(rootNodes[id], ip, buf);
  }

  comm.Bcast(buf, size*numLeafNode, MPI::BYTE, 0);
  delete[] buf;
}
开发者ID:avr-aics-riken,项目名称:BCMTools,代码行数:23,代码来源:BCMOctree.cpp


注:本文中的mpi::Intracomm类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。