本文整理汇总了C++中ProcessCommunicator类的典型用法代码示例。如果您正苦于以下问题:C++ ProcessCommunicator类的具体用法?C++ ProcessCommunicator怎么用?C++ ProcessCommunicator使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ProcessCommunicator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: unpackSharedDofManData
int
NodalAveragingRecoveryModel :: unpackSharedDofManData(parallelStruct *s, ProcessCommunicator &processComm)
{
int result = 1;
int i, j, eq, indx, size, flag, intValue;
IntArray const *toRecvMap = processComm.giveToRecvMap();
ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff();
double value;
size = toRecvMap->giveSize();
for ( i = 1; i <= size; i++ ) {
indx = s->regionNodalNumbers->at( toRecvMap->at(i) );
// toRecvMap contains all shared dofmans with remote partition
// one has to check, if particular shared node received contribution is available for given region
result &= pcbuff->unpackInt(flag);
if ( flag ) {
// "1" to indicates that for given shared node this is a valid contribution
result &= pcbuff->unpackInt(intValue);
// now check if we have a valid number
if ( indx ) {
s->regionDofMansConnectivity->at(indx) += intValue;
}
eq = ( indx - 1 ) * s->regionValSize;
for ( j = 1; j <= s->regionValSize; j++ ) {
result &= pcbuff->unpackDouble(value);
if ( indx ) {
s->lhs->at(eq + j) += value;
}
}
}
}
return result;
}
示例2: unpackMigratingElementDependencies
int NonlocalMaterialWTP :: unpackMigratingElementDependencies(Domain *d, ProcessCommunicator &pc)
{
int myrank = d->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int _globnum;
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
ProcessCommDataStream pcDataStream(pcbuff);
// unpack element data
do {
pcbuff->unpackInt(_globnum);
if ( _globnum == NonlocalMaterialWTP_END_DATA ) {
break;
}
pcbuff->unpackIntArray(nonlocElementDependencyMap [ _globnum ]);
} while ( 1 );
return 1;
}
示例3: packSharedDofManData
int
NodalAveragingRecoveryModel :: packSharedDofManData(parallelStruct *s, ProcessCommunicator &processComm)
{
int result = 1, i, j, indx, eq, size;
ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff();
IntArray const *toSendMap = processComm.giveToSendMap();
size = toSendMap->giveSize();
for ( i = 1; i <= size; i++ ) {
// toSendMap contains all shared dofmans with remote partition
// one has to check, if particular shared node value is available for given region
indx = s->regionNodalNumbers->at( toSendMap->at(i) );
if ( indx ) {
// pack "1" to indicate that for given shared node this is a valid contribution
result &= pcbuff->packInt(1);
result &= pcbuff->packInt( s->regionDofMansConnectivity->at(indx) );
eq = ( indx - 1 ) * s->regionValSize;
for ( j = 1; j <= s->regionValSize; j++ ) {
result &= pcbuff->packDouble( s->lhs->at(eq + j) );
}
} else {
// ok shared node is not in active region (determined by s->regionNodalNumbers)
result &= pcbuff->packInt(0);
}
}
return result;
}
示例4: packSharedDmanPartitions
int
ParmetisLoadBalancer :: packSharedDmanPartitions(ProcessCommunicator &pc)
{
int myrank = domain->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int ndofman, idofman;
DofManager *dofman;
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
// loop over dofManagers and pack shared dofMan data
ndofman = domain->giveNumberOfDofManagers();
for ( idofman = 1; idofman <= ndofman; idofman++ ) {
dofman = domain->giveDofManager(idofman);
// test if iproc is in list of existing shared partitions
if ( ( dofman->giveParallelMode() == DofManager_shared ) &&
( dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
// send new partitions to remote representation
// fprintf (stderr, "[%d] sending shared plist of %d to [%d]\n", myrank, dofman->giveGlobalNumber(), iproc);
pcbuff->write( dofman->giveGlobalNumber() );
this->giveDofManPartitions(idofman)->storeYourself(*pcbuff);
}
}
pcbuff->write((int)PARMETISLB_END_DATA);
return 1;
}
示例5: packDofManagers
int
StructuralEngngModel :: packDofManagers(FloatArray *src, ProcessCommunicator &processComm, bool prescribedEquations)
{
int result = 1;
int i, size;
int j, ndofs, eqNum;
Domain *domain = this->giveDomain(1);
IntArray const *toSendMap = processComm.giveToSendMap();
ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff();
DofManager *dman;
Dof *jdof;
size = toSendMap->giveSize();
for ( i = 1; i <= size; i++ ) {
dman = domain->giveDofManager( toSendMap->at(i) );
ndofs = dman->giveNumberOfDofs();
for ( j = 1; j <= ndofs; j++ ) {
jdof = dman->giveDof(j);
if ( prescribedEquations ) {
eqNum = jdof->__givePrescribedEquationNumber();
} else {
eqNum = jdof->__giveEquationNumber();
}
if ( jdof->isPrimaryDof() && eqNum ) {
result &= pcbuff->packDouble( src->at(eqNum) );
}
}
}
return result;
}
示例6: packMigratingElementDependencies
int NonlocalMaterialWTP :: packMigratingElementDependencies(Domain *d, ProcessCommunicator &pc)
{
int myrank = d->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
ProcessCommDataStream pcDataStream(pcbuff);
int ielem, nelem = d->giveNumberOfElements();
int _globnum;
Element *elem;
for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
elem = d->giveElement(ielem);
if ( ( elem->giveParallelMode() == Element_local ) &&
( lb->giveElementPartition(ielem) == iproc ) ) {
// pack local element (node numbers shuld be global ones!!!)
// pack type
_globnum = elem->giveGlobalNumber();
pcbuff->packInt(_globnum);
pcbuff->packIntArray(nonlocElementDependencyMap [ _globnum ]);
}
} // end loop over elements
// pack end-of-element-record
pcbuff->packInt(NonlocalMaterialWTP_END_DATA);
return 1;
}
示例7: packRemoteElements
int NonlocalMaterialWTP :: packRemoteElements(Domain *d, ProcessCommunicator &pc)
{
int myrank = d->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int nnodes, inode;
DofManager *node, *dofman;
Element *elem;
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
ProcessCommDataStream pcDataStream(pcbuff);
// here we have to pack also nodes that are shared by packed elements !!!
// assemble set of nodes needed by those elements
// these have to be send (except those that are shared)
std :: set< int >nodesToSend;
for ( int ie: toSendList [ iproc ] ) {
//ie = d->elementGlobal2Local(gie);
elem = d->giveElement(ie);
nnodes = elem->giveNumberOfDofManagers();
for ( int i = 1; i <= nnodes; i++ ) {
node = elem->giveDofManager(i);
if ( ( node->giveParallelMode() == DofManager_local ) ||
( node->isShared() && !node->givePartitionList()->contains(iproc) ) ) {
nodesToSend.insert( node->giveGlobalNumber() );
}
}
}
// pack nodes that become null nodes on remote partition
for ( int in: nodesToSend ) {
inode = d->dofmanGlobal2Local(in);
dofman = d->giveDofManager(inode);
pcbuff->packString( dofman->giveInputRecordName() );
dofman->saveContext(& pcDataStream, CM_Definition | CM_State | CM_UnknownDictState);
}
pcbuff->packString("");
for ( int ie: toSendList [ iproc ] ) {
//ie = d->elementGlobal2Local(gie);
elem = d->giveElement(ie);
// pack local element (node numbers shuld be global ones!!!)
// pack type
pcbuff->packString( elem->giveInputRecordName() );
elem->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State);
}
pcbuff->packString("");
return 1;
}
示例8: unpackRemoteElements
int NonlocalMaterialWTP :: unpackRemoteElements(Domain *d, ProcessCommunicator &pc)
{
int myrank = d->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
std :: string _type;
DofManager *dofman;
IntArray _partitions;
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
ProcessCommDataStream pcDataStream(pcbuff);
// unpack dofman data
do {
pcbuff->unpackString(_type);
if ( _type.size() == 0 ) {
break;
}
dofman = classFactory.createDofManager(_type.c_str(), 0, d);
dofman->restoreContext(& pcDataStream, CM_Definition | CM_State | CM_UnknownDictState);
dofman->setParallelMode(DofManager_null);
if ( d->dofmanGlobal2Local( dofman->giveGlobalNumber() ) ) {
// record already exist
delete dofman;
} else {
d->giveTransactionManager()->addDofManTransaction(DomainTransactionManager :: DTT_ADD,
dofman->giveGlobalNumber(),
dofman);
}
} while ( 1 );
// unpack element data
Element *elem;
_partitions.resize(1);
_partitions.at(1) = iproc;
do {
pcbuff->unpackString(_type);
if ( _type.size() == 0 ) {
break;
}
elem = classFactory.createElement(_type.c_str(), 0, d);
elem->restoreContext(& pcDataStream, CM_Definition | CM_State);
elem->setParallelMode(Element_remote);
elem->setPartitionList(_partitions);
d->giveTransactionManager()->addElementTransaction(DomainTransactionManager :: DTT_ADD,
elem->giveGlobalNumber(), elem);
} while ( 1 );
return 1;
}
示例9: packRemoteElementData
int
StructuralEngngModel :: packRemoteElementData(ProcessCommunicator &processComm)
{
int result = 1;
int i, size;
IntArray const *toSendMap = processComm.giveToSendMap();
CommunicationBuffer *send_buff = processComm.giveProcessCommunicatorBuff()->giveSendBuff();
Domain *domain = this->giveDomain(1);
size = toSendMap->giveSize();
for ( i = 1; i <= size; i++ ) {
result &= domain->giveElement( toSendMap->at(i) )->packUnknowns( * send_buff, this->giveCurrentStep() );
}
return result;
}
示例10: unpackDofManagers
int
StructuralEngngModel :: unpackDofManagers(FloatArray *dest, ProcessCommunicator &processComm, bool prescribedEquations)
{
int result = 1;
int i, size;
int j, ndofs, eqNum;
Domain *domain = this->giveDomain(1);
dofManagerParallelMode dofmanmode;
IntArray const *toRecvMap = processComm.giveToRecvMap();
ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff();
DofManager *dman;
Dof *jdof;
double value;
size = toRecvMap->giveSize();
for ( i = 1; i <= size; i++ ) {
dman = domain->giveDofManager( toRecvMap->at(i) );
ndofs = dman->giveNumberOfDofs();
dofmanmode = dman->giveParallelMode();
for ( j = 1; j <= ndofs; j++ ) {
jdof = dman->giveDof(j);
if ( prescribedEquations ) {
eqNum = jdof->__givePrescribedEquationNumber();
} else {
eqNum = jdof->__giveEquationNumber();
}
if ( jdof->isPrimaryDof() && eqNum ) {
result &= pcbuff->unpackDouble(value);
if ( dofmanmode == DofManager_shared ) {
dest->at(eqNum) += value;
} else if ( dofmanmode == DofManager_remote ) {
dest->at(eqNum) = value;
} else {
_error("unpackReactions: unknown dof namager parallel mode");
}
}
}
}
return result;
}
示例11: unpackSharedDmanPartitions
int
ParmetisLoadBalancer :: unpackSharedDmanPartitions(ProcessCommunicator &pc)
{
int myrank = domain->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int _globnum, _locnum;
IntArray _partitions;
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
// init domain global2local map
domain->initGlobalDofManMap();
pcbuff->read(_globnum);
// unpack dofman data
while ( _globnum != PARMETISLB_END_DATA ) {
_partitions.restoreYourself(*pcbuff);
if ( ( _locnum = domain->dofmanGlobal2Local(_globnum) ) ) {
this->addSharedDofmanPartitions(_locnum, _partitions);
} else {
OOFEM_ERROR("internal error, unknown global dofman %d", _globnum);
}
/*
* fprintf (stderr,"[%d] Received shared plist of %d ", myrank, _globnum);
* for (int _i=1; _i<=dofManPartitions[_locnum-1].giveSize(); _i++)
* fprintf (stderr,"%d ", dofManPartitions[_locnum-1].at(_i));
* fprintf (stderr,"\n");
*/
pcbuff->read(_globnum);
}
return 1;
}
示例12: catch
void Log::setupSelfBefore()
{
SimulationItem::setupSelfBefore();
ProcessCommunicator* comm;
try
{
// get a pointer to the ProcessCommunicator without performing setup
// to avoid catching (and hiding) fatal errors during such setup
comm = find<ProcessCommunicator>(false);
}
catch (FatalError)
{
return;
}
// Do the find operation again, now to perform the setup of the
// PeerToPeerCommunicator so that the correct rank is initialized
comm = find<ProcessCommunicator>();
if (comm->isMultiProc()) setRank(comm->rank());
}
示例13: unpackRemoteElementData
int
StructuralEngngModel :: unpackRemoteElementData(ProcessCommunicator &processComm)
{
int result = 1;
int i, size;
IntArray const *toRecvMap = processComm.giveToRecvMap();
CommunicationBuffer *recv_buff = processComm.giveProcessCommunicatorBuff()->giveRecvBuff();
Element *element;
Domain *domain = this->giveDomain(1);
size = toRecvMap->giveSize();
for ( i = 1; i <= size; i++ ) {
element = domain->giveElement( toRecvMap->at(i) );
if ( element->giveParallelMode() == Element_remote ) {
result &= element->unpackAndUpdateUnknowns( * recv_buff, this->giveCurrentStep() );
} else {
_error("unpackRemoteElementData: element is not remote");
}
}
return result;
}
示例14: unpackMigratingData
int
LoadBalancer :: unpackMigratingData(Domain *d, ProcessCommunicator &pc)
{
// create temp space for dofManagers and elements
// merging should be made by domain ?
// maps of new dofmanagers and elements indexed by global number
// we can put local dofManagers and elements into maps (should be done before unpacking)
// int nproc=this->giveEngngModel()->giveNumberOfProcesses();
int myrank = d->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int _mode, _globnum, _type;
bool _newentry;
classType _etype;
IntArray _partitions, local_partitions;
//LoadBalancer::DofManMode dmode;
DofManager *dofman;
DomainTransactionManager *dtm = d->giveTransactionManager();
// **************************************************
// Unpack migrating data to remote partition
// **************************************************
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
ProcessCommDataStream pcDataStream(pcbuff);
pcbuff->unpackInt(_type);
// unpack dofman data
while ( _type != LOADBALANCER_END_DATA ) {
_etype = ( classType ) _type;
pcbuff->unpackInt(_mode);
switch ( _mode ) {
case LoadBalancer :: DM_Remote:
// receiving new local dofManager
pcbuff->unpackInt(_globnum);
/*
* _newentry = false;
* if ( ( dofman = dtm->giveDofManager(_globnum) ) == NULL ) {
* // data not available -> create a new one
* _newentry = true;
* dofman = CreateUsrDefDofManagerOfType(_etype, 0, d);
* }
*/
_newentry = true;
dofman = CreateUsrDefDofManagerOfType(_etype, 0, d);
dofman->setGlobalNumber(_globnum);
// unpack dofman state (this is the local dofman, not available on remote)
dofman->restoreContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState);
// unpack list of new partitions
pcbuff->unpackIntArray(_partitions);
dofman->setPartitionList(& _partitions);
dofman->setParallelMode(DofManager_local);
// add transaction if new entry allocated; otherwise existing one has been modified via returned dofman
if ( _newentry ) {
dtm->addTransaction(DomainTransactionManager :: DTT_ADD, DomainTransactionManager :: DCT_DofManager, _globnum, dofman);
}
//dmanMap[_globnum] = dofman;
break;
case LoadBalancer :: DM_Shared:
// receiving new shared dofManager, that was local on sending partition
// should be received only once (from partition where was local)
pcbuff->unpackInt(_globnum);
/*
* _newentry = false;
* if ( ( dofman = dtm->giveDofManager(_globnum) ) == NULL ) {
* // data not available -> mode should be SharedUpdate
* _newentry = true;
* dofman = CreateUsrDefDofManagerOfType(_etype, 0, d);
* }
*/
_newentry = true;
dofman = CreateUsrDefDofManagerOfType(_etype, 0, d);
dofman->setGlobalNumber(_globnum);
// unpack dofman state (this is the local dofman, not available on remote)
dofman->restoreContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState);
// unpack list of new partitions
pcbuff->unpackIntArray(_partitions);
dofman->setPartitionList(& _partitions);
dofman->setParallelMode(DofManager_shared);
#ifdef __VERBOSE_PARALLEL
fprintf(stderr, "[%d] received Shared new dofman [%d]\n", myrank, _globnum);
#endif
// add transaction if new entry allocated; otherwise existing one has been modified via returned dofman
if ( _newentry ) {
dtm->addTransaction(DomainTransactionManager :: DTT_ADD, DomainTransactionManager :: DCT_DofManager, _globnum, dofman);
}
//dmanMap[_globnum] = dofman;
break;
//.........这里部分代码省略.........
示例15: packMigratingData
int
LoadBalancer :: packMigratingData(Domain *d, ProcessCommunicator &pc)
{
int myrank = d->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int idofman, ndofman;
classType dtype;
DofManager *dofman;
LoadBalancer :: DofManMode dmode;
// **************************************************
// Pack migrating data to remote partition
// **************************************************
// pack dofManagers
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
ProcessCommDataStream pcDataStream(pcbuff);
// loop over dofManagers
ndofman = d->giveNumberOfDofManagers();
for ( idofman = 1; idofman <= ndofman; idofman++ ) {
dofman = d->giveDofManager(idofman);
dmode = this->giveDofManState(idofman);
dtype = dofman->giveClassID();
// sync data to remote partition
// if dofman already present on remote partition then there is no need to sync
//if ((this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc))) {
if ( ( this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc) ) &&
( !dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
pcbuff->packInt(dtype);
pcbuff->packInt(dmode);
pcbuff->packInt( dofman->giveGlobalNumber() );
// pack dofman state (this is the local dofman, not available on remote)
/* this is a potential performance leak, sending shared dofman to a partition,
* in which is already shared does not require to send context (is already there)
* here for simplicity it is always send */
dofman->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState);
// send list of new partitions
pcbuff->packIntArray( * ( this->giveDofManPartitions(idofman) ) );
}
}
// pack end-of-dofman-section record
pcbuff->packInt(LOADBALANCER_END_DATA);
int ielem, nelem = d->giveNumberOfElements(), nsend = 0;
Element *elem;
for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
elem = d->giveElement(ielem);
if ( ( elem->giveParallelMode() == Element_local ) &&
( this->giveElementPartition(ielem) == iproc ) ) {
// pack local element (node numbers shuld be global ones!!!)
// pack type
pcbuff->packInt( elem->giveClassID() );
// nodal numbers shuld be packed as global !!
elem->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State);
nsend++;
}
} // end loop over elements
// pack end-of-element-record
pcbuff->packInt(LOADBALANCER_END_DATA);
OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: sending %d migrating elements to %d\n", myrank, nsend, iproc);
return 1;
}