本文整理汇总了C++中InterColComm::commSize方法的典型用法代码示例。如果您正苦于以下问题:C++ InterColComm::commSize方法的具体用法?C++ InterColComm::commSize怎么用?C++ InterColComm::commSize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类InterColComm
的用法示例。
在下文中一共展示了InterColComm::commSize方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: customexit
int customexit(HyPerCol * hc, int argc, char ** argv) {
pvadata_t correctvalue = 0.5f;
pvadata_t tolerance = 1.0e-7f;
if (hc->columnId()==0) {
pvInfo().printf("Checking whether input layer has all values equal to %f ...\n", correctvalue);
}
HyPerLayer * inputlayer = hc->getLayerFromName("input");
assert(inputlayer);
PVLayerLoc const * loc = inputlayer->getLayerLoc();
assert(loc->nf==1);
const int numNeurons = inputlayer->getNumNeurons();
assert(numNeurons>0);
int status = PV_SUCCESS;
int numExtended = inputlayer->getNumExtended();
InterColComm * icComm = hc->icCommunicator();
pvadata_t * layerData = (pvadata_t *) icComm->publisherStore(inputlayer->getLayerId())->buffer(LOCAL);
int rootproc = 0;
if (icComm->commRank()==rootproc) {
pvadata_t * databuffer = (pvadata_t *) malloc(numExtended*sizeof(pvadata_t));
assert(databuffer);
for (int proc=0; proc<icComm->commSize(); proc++) {
if (proc==rootproc) {
memcpy(databuffer, layerData, numExtended*sizeof(pvadata_t));
}
else {
MPI_Recv(databuffer, numExtended*sizeof(pvadata_t),MPI_BYTE,proc,15,icComm->communicator(), MPI_STATUS_IGNORE);
}
// At this point, databuffer on rank 0 should contain the extended input layer on rank proc
for (int k=0; k<numNeurons; k++) {
int kExt = kIndexExtended(k,loc->nx,loc->ny,loc->nf,loc->halo.lt,loc->halo.rt,loc->halo.dn,loc->halo.up);
pvadata_t value = databuffer[kExt];
if (fabs(value-correctvalue)>=tolerance) {
pvErrorNoExit().printf("Rank %d, restricted index %d, extended index %d, value is %f instead of %f\n",
proc, k, kExt, value, correctvalue);
status = PV_FAILURE;
}
}
}
free(databuffer);
if (status == PV_SUCCESS) {
pvInfo().printf("%s succeeded.\n", argv[0]);
}
else {
pvError().printf("%s failed.\n", argv[0]);
}
}
else {
MPI_Send(layerData,numExtended*sizeof(pvadata_t),MPI_BYTE,rootproc,15,icComm->communicator());
}
MPI_Barrier(icComm->communicator());
return status;
}
示例2: updateState
int SegmentLayer::updateState(double timef, double dt) {
pvdata_t* srcA = originalLayer->getActivity();
pvdata_t* thisA = getActivity();
assert(srcA);
assert(thisA);
const PVLayerLoc* loc = getLayerLoc();
//Segment input layer based on segmentMethod
if(strcmp(segmentMethod, "none") == 0){
int numBatchExtended = getNumExtendedAllBatches();
//Copy activity over
//Since both buffers should be identical size, we can do a memcpy here
memcpy(thisA, srcA, numBatchExtended * sizeof(pvdata_t));
}
else{
//This case should never happen
assert(0);
}
assert(loc->nf == 1);
//Clear centerIdxs
for(int bi = 0; bi < loc->nbatch; bi++){
centerIdx[bi].clear();
}
for(int bi = 0; bi < loc->nbatch; bi++){
pvdata_t* batchA = thisA + bi * getNumExtended();
//Reset max/min buffers
maxX.clear();
maxY.clear();
minX.clear();
minY.clear();
//Loop through this buffer to fill labelVec and idxVec
//Looping through restricted, but indices are extended
for(int yi = loc->halo.up; yi < loc->ny+loc->halo.up; yi++){
for(int xi = loc->halo.lt; xi < loc->nx+loc->halo.lt; xi++){
//Convert to local extended linear index
int niLocalExt = yi * (loc->nx+loc->halo.lt+loc->halo.rt) + xi;
//Convert yi and xi to global res index
int globalResYi = yi - loc->halo.up + loc->ky0;
int globalResXi = xi - loc->halo.lt + loc->kx0;
//Get label value
//Note that we're assuming that the activity here are integers,
//even though the buffer is floats
int labelVal = round(batchA[niLocalExt]);
//Calculate max/min x and y for a single batch
//If labelVal exists in map
if(maxX.count(labelVal)){
//Here, we're assuming the 4 maps are in sync, so we use the
//.at method, as it will throw an exception as opposed to the
//[] operator, which will simply add the key into the map
if(globalResXi > maxX.at(labelVal)){
maxX[labelVal] = globalResXi;
}
if(globalResXi < minX.at(labelVal)){
minX[labelVal] = globalResXi;
}
if(globalResYi > maxY.at(labelVal)){
maxY[labelVal] = globalResYi;
}
if(globalResYi < minY.at(labelVal)){
minY[labelVal] = globalResYi;
}
}
//If doesn't exist, add into map with current vals
else{
maxX[labelVal] = globalResXi;
minX[labelVal] = globalResXi;
maxY[labelVal] = globalResYi;
minY[labelVal] = globalResYi;
}
}
}
//We need to mpi across processors in case a segment crosses an mpi boundary
InterColComm * icComm = parent->icCommunicator();
int numMpi = icComm->commSize();
int rank = icComm->commRank();
//Local comm rank
//Non root processes simply send buffer size and then buffers
int numLabels = maxX.size();
if(rank != 0){
//Load buffers
loadLabelBuf();
//Send number of labels first
MPI_Send(&numLabels, 1, MPI_INT, 0, rank, icComm->communicator());
//Send labels, then max/min buffers
MPI_Send(labelBuf, numLabels, MPI_INT, 0, rank, icComm->communicator());
MPI_Send(maxXBuf, numLabels, MPI_INT, 0, rank, icComm->communicator());
MPI_Send(maxYBuf, numLabels, MPI_INT, 0, rank, icComm->communicator());
MPI_Send(minXBuf, numLabels, MPI_INT, 0, rank, icComm->communicator());
MPI_Send(minYBuf, numLabels, MPI_INT, 0, rank, icComm->communicator());
//.........这里部分代码省略.........