当前位置: 首页>>代码示例>>C++>>正文


C++ Intracomm::Bcast方法代码示例

本文整理汇总了C++中mpi::Intracomm::Bcast方法的典型用法代码示例。如果您正苦于以下问题:C++ Intracomm::Bcast方法的具体用法?C++ Intracomm::Bcast怎么用?C++ Intracomm::Bcast使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mpi::Intracomm的用法示例。


在下文中一共展示了Intracomm::Bcast方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: bcast

   /*
   * Broadcast a buffer.
   */
   void Buffer::bcast(MPI::Intracomm& comm, int source)
   {
      int comm_size = comm.Get_size();
      int myRank = comm.Get_rank();
      if (source > comm_size - 1 || source < 0) {
         UTIL_THROW("Source rank out of bounds");
      }

      int sendBytes;
      if (myRank == source) {
         sendBytes = sendPtr_ - sendBufferBegin_;
         comm.Bcast(&sendBytes, 1, MPI::INT, source);
         comm.Bcast(sendBufferBegin_, sendBytes, MPI::CHAR, source);
         sendPtr_ = sendBufferBegin_;
         sendType_ = NONE;
      } else {
         comm.Bcast(&sendBytes, 1, MPI::INT, source);
         comm.Bcast(recvBufferBegin_, sendBytes, MPI::CHAR, source);
         recvPtr_ = recvBufferBegin_;
         recvType_ = NONE;
      }
      if (sendBytes > maxSendLocal_) {
         maxSendLocal_ = sendBytes;
      }

   }
开发者ID:pombredanne,项目名称:simpatico,代码行数:29,代码来源:Buffer.cpp

示例2: broadcast

/// Octree情報を他rankにブロードキャスト.
void BCMOctree::broadcast(MPI::Intracomm& comm)
{
  assert(comm.Get_rank() == 0);
  rootGrid->broadcast(comm);

  int numLeafNode = leafNodeArray.size();
  int ibuf[2];
  ibuf[0] = numLeafNode;
  ibuf[1] = ordering;
  comm.Bcast(&ibuf, 2, MPI::INT, 0);

  size_t size = Pedigree::GetSerializeSize();
  unsigned char* buf = new unsigned char[size * numLeafNode];

  size_t ip = 0;
  for (int id = 0; id < rootGrid->getSize(); id++) {
    packPedigrees(rootNodes[id], ip, buf);
  }

  comm.Bcast(buf, size*numLeafNode, MPI::BYTE, 0);
  delete[] buf;
}
开发者ID:avr-aics-riken,项目名称:BCMTools,代码行数:23,代码来源:BCMOctree.cpp

示例3: neighbors

ifstream& FullyDistSpVec<IT,NT>::ReadDistribute (ifstream& infile, int master)
{
	IT total_nnz;
	MPI::Intracomm World = commGrid->GetWorld();
	int neighs = World.Get_size();	// number of neighbors (including oneself)
	int buffperneigh = MEMORYINBYTES / (neighs * (sizeof(IT) + sizeof(NT)));

	int * displs = new int[neighs];
	for (int i=0; i<neighs; ++i)
		displs[i] = i*buffperneigh;

	int * curptrs = NULL; 
	int recvcount = 0;
	IT * inds = NULL; 
	NT * vals = NULL;
	int rank = World.Get_rank();	
	if(rank == master)	// 1 processor only
	{		
		inds = new IT [ buffperneigh * neighs ];
		vals = new NT [ buffperneigh * neighs ];
		curptrs = new int[neighs]; 
		fill_n(curptrs, neighs, 0);	// fill with zero
		if (infile.is_open())
		{
			infile.clear();
			infile.seekg(0);
			infile >> glen >> total_nnz;
			World.Bcast(&glen, 1, MPIType<IT>(), master);			
	
			IT tempind;
			NT tempval;
			double loadval;
			IT cnz = 0;
			while ( (!infile.eof()) && cnz < total_nnz)
			{
				infile >> tempind;
				//infile >> tempval;
				infile >> loadval;
				tempval = static_cast<NT>(loadval);
				tempind--;
				IT locind;
				int rec = Owner(tempind, locind);	// recipient (owner) processor
				inds[ rec * buffperneigh + curptrs[rec] ] = locind;
				vals[ rec * buffperneigh + curptrs[rec] ] = tempval;
				++ (curptrs[rec]);				

				if(curptrs[rec] == buffperneigh || (cnz == (total_nnz-1)) )		// one buffer is full, or file is done !
				{
					// first, send the receive counts ...
					World.Scatter(curptrs, 1, MPI::INT, &recvcount, 1, MPI::INT, master);

					// generate space for own recv data ... (use arrays because vector<bool> is cripled, if NT=bool)
					IT * tempinds = new IT[recvcount];
					NT * tempvals = new NT[recvcount];
					
					// then, send all buffers that to their recipients ...
					World.Scatterv(inds, curptrs, displs, MPIType<IT>(), tempinds, recvcount,  MPIType<IT>(), master); 
					World.Scatterv(vals, curptrs, displs, MPIType<NT>(), tempvals, recvcount,  MPIType<NT>(), master); 
		
					// now push what is ours to tuples
					for(IT i=0; i< recvcount; ++i)
					{					
						ind.push_back( tempinds[i] );	// already offset'd by the sender
						num.push_back( tempvals[i] );
					}

					// reset current pointers so that we can reuse {inds,vals} buffers
					fill_n(curptrs, neighs, 0);
					DeleteAll(tempinds, tempvals);
				}
				++ cnz;
			}
			assert (cnz == total_nnz);
		
			// Signal the end of file to other processors along the diagonal
			fill_n(curptrs, neighs, numeric_limits<int>::max());	
			World.Scatter(curptrs, 1, MPI::INT, &recvcount, 1, MPI::INT, master);
		}
开发者ID:harperj,项目名称:KDTSpecializer,代码行数:78,代码来源:FullyDistSpVec.cpp


注:本文中的mpi::Intracomm::Bcast方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。