本文整理汇总了C++中PatchMap::node方法的典型用法代码示例。如果您正苦于以下问题:C++ PatchMap::node方法的具体用法?C++ PatchMap::node怎么用?C++ PatchMap::node使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PatchMap
的用法示例。
在下文中一共展示了PatchMap::node方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: releaseComputes
void Sync::releaseComputes()
{
PatchMap *patchMap = PatchMap::Object();
traceUserEvent(eventReleaseComputes);
for (int i= 0; i<cnum; i++) {
int &pid = clist[i].pid;
if (pid == -1) continue;
if (clist[i].step != step) {
continue;
}
// CkPrintf(" %d-%d-%d ",
// clist[i].pid, clist[i].step,
// patchMap->patch(pid)->flags.sequence);
ComputePtrListIter cid = clist[i].cid;
int compute_count = 0;
for(cid = cid.begin(); cid != cid.end(); cid++) {
compute_count++;
(*cid)->patchReady(pid,clist[i].doneMigration,step);
}
if (compute_count == 0 && patchMap->node(pid) != CkMyPe()) {
iout << iINFO << "PATCH_COUNT-Sync step " << step
<< "]: Patch " << pid << " on PE "
<< CkMyPe() <<" home patch "
<< patchMap->node(pid) << " does not have any computes\n"
<< endi;
}
pid = -1;
}
// CkPrintf("\n");
}
示例2: requiredProxies
int NamdHybridLB::requiredProxies(PatchID id, int neighborNodes[])
{
PatchMap* patchMap = PatchMap::Object();
int myNode = patchMap->node(id);
int nProxyNodes = 0;
#define IF_NEW_NODE \
int j; \
for ( j=0; j<nProxyNodes && neighborNodes[j] != proxyNode; ++j ); \
if ( j == nProxyNodes )
PatchID neighbors[1 + PatchMap::MaxOneAway + PatchMap::MaxTwoAway];
neighbors[0] = id;
int numNeighbors = 1 + patchMap->downstreamNeighbors(id,neighbors+1);
for ( int i = 0; i < numNeighbors; ++i ) {
const int proxyNode = patchMap->basenode(neighbors[i]);
if ( proxyNode != myNode ) {
IF_NEW_NODE {
neighborNodes[nProxyNodes] = proxyNode;
nProxyNodes++;
}
}
}
示例3: buildData
/**
* @brief Builds the data structures required for the load balancing strategies in NAMD.
*/
int NamdHybridLB::buildData(LDStats* stats) {
int n_pes = stats->nprocs();
PatchMap* patchMap = PatchMap::Object();
ComputeMap* computeMap = ComputeMap::Object();
const SimParameters* simParams = Node::Object()->simParameters;
BigReal bgfactor = simParams->ldbBackgroundScaling;
BigReal pmebgfactor = simParams->ldbPMEBackgroundScaling;
BigReal homebgfactor = simParams->ldbHomeBackgroundScaling;
int pmeOn = simParams->PMEOn;
int unLoadPme = simParams->ldbUnloadPME;
int pmeBarrier = simParams->PMEBarrier;
int unLoadZero = simParams->ldbUnloadZero;
int unLoadOne = simParams->ldbUnloadOne;
int unLoadIO= simParams->ldbUnloadOutputPEs;
// traversing the list of processors and getting their load information
int i, pe_no;
for (i=0; i<n_pes; ++i) {
pe_no = stats->procs[i].pe;
// BACKUP processorArray[i].Id = i;
processorArray[i].Id = pe_no; // absolute pe number
processorArray[i].available = true;
// BACKUP if ( pmeOn && isPmeProcessor(i) )
if ( pmeOn && isPmeProcessor(pe_no) ) {
processorArray[i].backgroundLoad = pmebgfactor * stats->procs[i].bg_walltime;
// BACKUP } else if (patchMap->numPatchesOnNode(i) > 0) {
} else if (patchMap->numPatchesOnNode(pe_no) > 0) {
processorArray[i].backgroundLoad = homebgfactor * stats->procs[i].bg_walltime;
} else {
processorArray[i].backgroundLoad = bgfactor * stats->procs[i].bg_walltime;
}
processorArray[i].idleTime = stats->procs[i].idletime;
processorArray[i].load = processorArray[i].computeLoad = 0.0;
}
// If I am group zero, then offload processor 0 and 1 in my group
if(stats->procs[0].pe == 0) {
if(unLoadZero) processorArray[0].available = false;
if(unLoadOne) processorArray[1].available = false;
}
// if all pes are Pme, disable this flag
if (pmeOn && unLoadPme) {
for (i=0; i<n_pes; i++) {
if(!isPmeProcessor(stats->procs[i].pe)) break;
}
if (i == n_pes) {
iout << iINFO << "Turned off unLoadPme flag!\n" << endi;
unLoadPme = 0;
}
}
if (pmeOn && unLoadPme) {
for (i=0; i<n_pes; i++) {
if ((pmeBarrier && i==0) || isPmeProcessor(stats->procs[i].pe))
processorArray[i].available = false;
}
}
// if all pes are output, disable this flag
#ifdef MEM_OPT_VERSION
if (unLoadIO) {
if (simParams->numoutputprocs == n_pes) {
iout << iINFO << "Turned off unLoadIO flag!\n" << endi;
unLoadIO = 0;
}
}
if (unLoadIO){
for (i=0; i<n_pes; i++) {
if (isOutputProcessor(stats->procs[i].pe))
processorArray[i].available = false;
}
}
#endif
// need to go over all patches to get all required proxies
int numPatches = patchMap->numPatches();
int totalLocalProxies = 0;
int totalProxies = 0;
for ( int pid=0; pid<numPatches; ++pid ) {
int neighborNodes[PatchMap::MaxOneAway + PatchMap::MaxTwoAway];
patchArray[pid].Id = pid;
patchArray[pid].numAtoms = 0;
patchArray[pid].processor = patchMap->node(pid);
const int numProxies =
#if 0 // USE_TOPOMAP - this function needs to be there for the hybrid case
requiredProxiesOnProcGrid(pid,neighborNodes);
#else
requiredProxies(pid, neighborNodes);
#endif
int numLocalProxies = 0;
for (int k=0; k<numProxies; k++) {
//.........这里部分代码省略.........
示例4: mapPartitionsToNodes
void OrbLB::mapPartitionsToNodes()
{
int i,j;
#if 1
if (!_lb_args.ignoreBgLoad()) {
// processor mapping has already been determined by the background load pe
for (i=0; i<npartition; i++) partitions[i].node = partitions[i].bkpes[0];
}
else {
int n = 0;
for (i=0; i<P; i++) {
if (!statsData->procs[i].available) continue;
partitions[n++].node = i;
}
}
#else
PatchMap *patchMap = PatchMap::Object();
int **pool = new int *[P];
for (i=0; i<P; i++) pool[i] = new int[P];
for (i=0; i<P; i++) for (j=0; j<P; j++) pool[i][j] = 0;
// sum up the number of nodes that patches of computes are on
for (i=0; i<numComputes; i++)
{
for (j=0; j<P; j++)
if (computeLoad[i].refno == partitions[j].refno)
{
int node1 = patchMap->node(computes[i].patch1);
int node2 = patchMap->node(computes[i].patch2);
pool[j][node1]++;
pool[j][node2]++;
}
}
#ifdef DEBUG
for (i=0; i<P; i++) {
for (j=0; j<P; j++) CmiPrintf("%d ", pool[i][j]);
CmiPrintf("\n");
}
#endif
while (1)
{
int index=-1, node=0, eager=-1;
for (j=0; j<npartition; j++) {
if (partitions[j].node != -1) continue;
int wantmost=-1, maxnodes=-1;
for (k=0; k<P; k++) if (pool[j][k] > maxnodes && !partitions[k].mapped) {wantmost=k; maxnodes = pool[j][k];}
if (maxnodes > eager) {
index = j; node = wantmost; eager = maxnodes;
}
}
if (eager == -1) break;
partitions[index].node = node;
partitions[node].mapped = 1;
}
for (i=0; i<P; i++) delete [] pool[i];
delete [] pool;
#endif
/*
if (_lb_args.debug()) {
CmiPrintf("partition load: ");
for (i=0; i<npartition; i++) CmiPrintf("%f ", partitions[i].load);
CmiPrintf("\n");
CmiPrintf("partitions to nodes mapping: ");
for (i=0; i<npartition; i++) CmiPrintf("%d ", partitions[i].node);
CmiPrintf("\n");
}
*/
if (_lb_args.debug()) {
CmiPrintf("After partitioning: \n");
for (i=0; i<npartition; i++) {
double bgload = 0.0;
if (!_lb_args.ignoreBgLoad())
bgload = statsData->procs[partitions[i].bkpes[0]].bg_walltime;
CmiPrintf("[%d=>%d] (%d,%d,%d) (%d,%d,%d) load:%f count:%d objload:%f\n", i, partitions[i].node, partitions[i].origin[0], partitions[i].origin[1], partitions[i].origin[2], partitions[i].corner[0], partitions[i].corner[1], partitions[i].corner[2], partitions[i].load, partitions[i].count, partitions[i].load-bgload);
}
for (i=npartition; i<P; i++) CmiPrintf("[%d] --------- \n", i);
}
}