本文整理汇总了C++中DofManager::givePartitionList方法的典型用法代码示例。如果您正苦于以下问题:C++ DofManager::givePartitionList方法的具体用法?C++ DofManager::givePartitionList怎么用?C++ DofManager::givePartitionList使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DofManager
的用法示例。
在下文中一共展示了DofManager::givePartitionList方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: packSharedDmanPartitions
int
ParmetisLoadBalancer :: packSharedDmanPartitions(ProcessCommunicator &pc)
{
int myrank = domain->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int ndofman, idofman;
DofManager *dofman;
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
// loop over dofManagers and pack shared dofMan data
ndofman = domain->giveNumberOfDofManagers();
for ( idofman = 1; idofman <= ndofman; idofman++ ) {
dofman = domain->giveDofManager(idofman);
// test if iproc is in list of existing shared partitions
if ( ( dofman->giveParallelMode() == DofManager_shared ) &&
( dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
// send new partitions to remote representation
// fprintf (stderr, "[%d] sending shared plist of %d to [%d]\n", myrank, dofman->giveGlobalNumber(), iproc);
pcbuff->write( dofman->giveGlobalNumber() );
this->giveDofManPartitions(idofman)->storeYourself(*pcbuff);
}
}
pcbuff->write((int)PARMETISLB_END_DATA);
return 1;
}
示例2: _empty
/* will delete those dofmanagers, that were sent to remote partition and are locally owned here
* so they are no longer necessary (those with state equal to DM_Remote and DM_SharedMerge)
* This will update domain DofManager list as well as global dmanMap and physically deletes the remote dofManager
*/
void
LoadBalancer :: deleteRemoteDofManagers(Domain *d)
{
int i, ndofman = d->giveNumberOfDofManagers();
//LoadBalancer* lb = this->giveLoadBalancer();
LoadBalancer :: DofManMode dmode;
DofManager *dman;
int myrank = d->giveEngngModel()->giveRank();
DomainTransactionManager *dtm = d->giveTransactionManager();
// loop over local nodes
for ( i = 1; i <= ndofman; i++ ) {
dmode = this->giveDofManState(i);
if ( ( dmode == LoadBalancer :: DM_Remote ) ) {
// positive candidate found
dtm->addTransaction(DomainTransactionManager :: DTT_Remove, DomainTransactionManager :: DCT_DofManager, d->giveDofManager(i)->giveGlobalNumber(), NULL);
// dmanMap.erase (d->giveDofManager (i)->giveGlobalNumber());
//dman = dofManagerList->unlink (i);
//delete dman;
} else if ( ( dmode == LoadBalancer :: DM_NULL ) ) {
// positive candidate found; we delete all null dof managers
// they will be created by nonlocalmatwtp if necessary.
// potentially, they can be reused, but this will make the code too complex
dtm->addTransaction(DomainTransactionManager :: DTT_Remove, DomainTransactionManager :: DCT_DofManager, d->giveDofManager(i)->giveGlobalNumber(), NULL);
} else if ( dmode == LoadBalancer :: DM_Shared ) {
dman = d->giveDofManager(i);
dman->setPartitionList( this->giveDofManPartitions(i) );
dman->setParallelMode(DofManager_shared);
if ( !dman->givePartitionList()->findFirstIndexOf(myrank) ) {
dtm->addTransaction(DomainTransactionManager :: DTT_Remove, DomainTransactionManager :: DCT_DofManager, d->giveDofManager(i)->giveGlobalNumber(), NULL);
//dmanMap.erase (this->giveDofManager (i)->giveGlobalNumber());
//dman = dofManagerList->unlink (i);
//delete dman;
}
} else if ( dmode == LoadBalancer :: DM_Local ) {
IntArray _empty(0);
dman = d->giveDofManager(i);
dman->setPartitionList(& _empty);
dman->setParallelMode(DofManager_local);
} else {
OOFEM_ERROR("Domain::deleteRemoteDofManagers: unknown dmode encountered");
}
}
}
示例3: sizeToSend
void
PetscNatural2GlobalOrdering :: init(EngngModel *emodel, EquationID ut, int di, EquationType et)
{
Domain *d = emodel->giveDomain(di);
int i, j, k, p, ndofs, ndofman = d->giveNumberOfDofManagers();
int myrank = emodel->giveRank();
DofManager *dman;
// determine number of local eqs + number of those shared DOFs which are numbered by receiver
// shared dofman is numbered on partition with lovest rank number
EModelDefaultEquationNumbering dn;
EModelDefaultPrescribedEquationNumbering dpn;
#ifdef __VERBOSE_PARALLEL
VERBOSEPARALLEL_PRINT("PetscNatural2GlobalOrdering :: init", "initializing N2G ordering", myrank);
#endif
l_neqs = 0;
for ( i = 1; i <= ndofman; i++ ) {
dman = d->giveDofManager(i);
/*
* if (dman->giveParallelMode() == DofManager_local) { // count all dofman eqs
* ndofs = dman->giveNumberOfDofs ();
* for (j=1; j<=ndofs; j++) {
* if (dman->giveDof(j)->isPrimaryDof()) {
* if (dman->giveDof(j)->giveEquationNumber()) l_neqs++;
* }
* }
* } else if (dman->giveParallelMode() == DofManager_shared) {
* // determine if problem is the lowest one sharing the dofman; if yes the receiver is responsible to
* // deliver number
* IntArray *plist = dman->givePartitionList();
* int n = plist->giveSize();
* int minrank = myrank;
* for (j=1; j<=n; j++) minrank = min (minrank, plist->at(j));
* if (minrank == myrank) { // count eqs
* ndofs = dman->giveNumberOfDofs ();
* for (j=1; j<=ndofs; j++) {
* if (dman->giveDof(j)->isPrimaryDof()) {
* if (dman->giveDof(j)->giveEquationNumber()) l_neqs++;
* }
* }
* }
* } // end shared dman
*/
if ( isLocal(dman) ) {
ndofs = dman->giveNumberOfDofs();
for ( j = 1; j <= ndofs; j++ ) {
if ( dman->giveDof(j)->isPrimaryDof() ) {
if ( et == et_standard ) {
if ( dman->giveDof(j)->giveEquationNumber(dn) ) {
l_neqs++;
}
} else {
if ( dman->giveDof(j)->giveEquationNumber(dpn) ) {
l_neqs++;
}
}
}
}
}
}
// exchange with other procs the number of eqs numbered on particular procs
int *leqs = new int [ emodel->giveNumberOfProcesses() ];
MPI_Allgather(& l_neqs, 1, MPI_INT, leqs, 1, MPI_INT, MPI_COMM_WORLD);
// compute local offset
int offset = 0;
for ( j = 0; j < myrank; j++ ) {
offset += leqs [ j ];
}
// count global number of eqs
for ( g_neqs = 0, j = 0; j < emodel->giveNumberOfProcesses(); j++ ) {
g_neqs += leqs [ j ];
}
// send numbered shared ones
if ( et == et_standard ) {
locGlobMap.resize( emodel->giveNumberOfEquations(ut) );
} else {
locGlobMap.resize( emodel->giveNumberOfPrescribedEquations(ut) );
}
// determine shared dofs
int psize, nproc = emodel->giveNumberOfProcesses();
IntArray sizeToSend(nproc), sizeToRecv(nproc), nrecToReceive(nproc);
#ifdef __VERBOSE_PARALLEL
IntArray nrecToSend(nproc);
#endif
const IntArray *plist;
for ( i = 1; i <= ndofman; i++ ) {
// if (domain->giveDofManager(i)->giveParallelMode() == DofManager_shared) {
if ( isShared( d->giveDofManager(i) ) ) {
int n = d->giveDofManager(i)->giveNumberOfDofs();
plist = d->giveDofManager(i)->givePartitionList();
psize = plist->giveSize();
int minrank = myrank;
for ( j = 1; j <= psize; j++ ) {
minrank = min( minrank, plist->at(j) );
}
//.........这里部分代码省略.........
示例4: pcDataStream
int
LoadBalancer :: packMigratingData(Domain *d, ProcessCommunicator &pc)
{
int myrank = d->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int idofman, ndofman;
classType dtype;
DofManager *dofman;
LoadBalancer :: DofManMode dmode;
// **************************************************
// Pack migrating data to remote partition
// **************************************************
// pack dofManagers
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
ProcessCommDataStream pcDataStream(pcbuff);
// loop over dofManagers
ndofman = d->giveNumberOfDofManagers();
for ( idofman = 1; idofman <= ndofman; idofman++ ) {
dofman = d->giveDofManager(idofman);
dmode = this->giveDofManState(idofman);
dtype = dofman->giveClassID();
// sync data to remote partition
// if dofman already present on remote partition then there is no need to sync
//if ((this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc))) {
if ( ( this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc) ) &&
( !dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
pcbuff->packInt(dtype);
pcbuff->packInt(dmode);
pcbuff->packInt( dofman->giveGlobalNumber() );
// pack dofman state (this is the local dofman, not available on remote)
/* this is a potential performance leak, sending shared dofman to a partition,
* in which is already shared does not require to send context (is already there)
* here for simplicity it is always send */
dofman->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState);
// send list of new partitions
pcbuff->packIntArray( * ( this->giveDofManPartitions(idofman) ) );
}
}
// pack end-of-dofman-section record
pcbuff->packInt(LOADBALANCER_END_DATA);
int ielem, nelem = d->giveNumberOfElements(), nsend = 0;
Element *elem;
for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
elem = d->giveElement(ielem);
if ( ( elem->giveParallelMode() == Element_local ) &&
( this->giveElementPartition(ielem) == iproc ) ) {
// pack local element (node numbers shuld be global ones!!!)
// pack type
pcbuff->packInt( elem->giveClassID() );
// nodal numbers shuld be packed as global !!
elem->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State);
nsend++;
}
} // end loop over elements
// pack end-of-element-record
pcbuff->packInt(LOADBALANCER_END_DATA);
OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: sending %d migrating elements to %d\n", myrank, nsend, iproc);
return 1;
}