本文整理汇总了C++中ProcessCommunicator::giveProcessCommunicatorBuff方法的典型用法代码示例。如果您正苦于以下问题:C++ ProcessCommunicator::giveProcessCommunicatorBuff方法的具体用法?C++ ProcessCommunicator::giveProcessCommunicatorBuff怎么用?C++ ProcessCommunicator::giveProcessCommunicatorBuff使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ProcessCommunicator
的用法示例。
在下文中一共展示了ProcessCommunicator::giveProcessCommunicatorBuff方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: unpackSharedDofManData
int
NodalAveragingRecoveryModel :: unpackSharedDofManData(parallelStruct *s, ProcessCommunicator &processComm)
{
int result = 1;
int i, j, eq, indx, size, flag, intValue;
IntArray const *toRecvMap = processComm.giveToRecvMap();
ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff();
double value;
size = toRecvMap->giveSize();
for ( i = 1; i <= size; i++ ) {
indx = s->regionNodalNumbers->at( toRecvMap->at(i) );
// toRecvMap contains all shared dofmans with remote partition
// one has to check, if particular shared node received contribution is available for given region
result &= pcbuff->unpackInt(flag);
if ( flag ) {
// "1" to indicates that for given shared node this is a valid contribution
result &= pcbuff->unpackInt(intValue);
// now check if we have a valid number
if ( indx ) {
s->regionDofMansConnectivity->at(indx) += intValue;
}
eq = ( indx - 1 ) * s->regionValSize;
for ( j = 1; j <= s->regionValSize; j++ ) {
result &= pcbuff->unpackDouble(value);
if ( indx ) {
s->lhs->at(eq + j) += value;
}
}
}
}
return result;
}
示例2: pcDataStream
int NonlocalMaterialWTP :: unpackMigratingElementDependencies(Domain *d, ProcessCommunicator &pc)
{
int myrank = d->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int _globnum;
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
ProcessCommDataStream pcDataStream(pcbuff);
// unpack element data
do {
pcbuff->unpackInt(_globnum);
if ( _globnum == NonlocalMaterialWTP_END_DATA ) {
break;
}
pcbuff->unpackIntArray(nonlocElementDependencyMap [ _globnum ]);
} while ( 1 );
return 1;
}
示例3: packSharedDofManData
int
NodalAveragingRecoveryModel :: packSharedDofManData(parallelStruct *s, ProcessCommunicator &processComm)
{
int result = 1, i, j, indx, eq, size;
ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff();
IntArray const *toSendMap = processComm.giveToSendMap();
size = toSendMap->giveSize();
for ( i = 1; i <= size; i++ ) {
// toSendMap contains all shared dofmans with remote partition
// one has to check, if particular shared node value is available for given region
indx = s->regionNodalNumbers->at( toSendMap->at(i) );
if ( indx ) {
// pack "1" to indicate that for given shared node this is a valid contribution
result &= pcbuff->packInt(1);
result &= pcbuff->packInt( s->regionDofMansConnectivity->at(indx) );
eq = ( indx - 1 ) * s->regionValSize;
for ( j = 1; j <= s->regionValSize; j++ ) {
result &= pcbuff->packDouble( s->lhs->at(eq + j) );
}
} else {
// ok shared node is not in active region (determined by s->regionNodalNumbers)
result &= pcbuff->packInt(0);
}
}
return result;
}
示例4: packSharedDmanPartitions
int
ParmetisLoadBalancer :: packSharedDmanPartitions(ProcessCommunicator &pc)
{
int myrank = domain->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int ndofman, idofman;
DofManager *dofman;
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
// loop over dofManagers and pack shared dofMan data
ndofman = domain->giveNumberOfDofManagers();
for ( idofman = 1; idofman <= ndofman; idofman++ ) {
dofman = domain->giveDofManager(idofman);
// test if iproc is in list of existing shared partitions
if ( ( dofman->giveParallelMode() == DofManager_shared ) &&
( dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
// send new partitions to remote representation
// fprintf (stderr, "[%d] sending shared plist of %d to [%d]\n", myrank, dofman->giveGlobalNumber(), iproc);
pcbuff->write( dofman->giveGlobalNumber() );
this->giveDofManPartitions(idofman)->storeYourself(*pcbuff);
}
}
pcbuff->write((int)PARMETISLB_END_DATA);
return 1;
}
示例5: packDofManagers
int
StructuralEngngModel :: packDofManagers(FloatArray *src, ProcessCommunicator &processComm, bool prescribedEquations)
{
int result = 1;
int i, size;
int j, ndofs, eqNum;
Domain *domain = this->giveDomain(1);
IntArray const *toSendMap = processComm.giveToSendMap();
ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff();
DofManager *dman;
Dof *jdof;
size = toSendMap->giveSize();
for ( i = 1; i <= size; i++ ) {
dman = domain->giveDofManager( toSendMap->at(i) );
ndofs = dman->giveNumberOfDofs();
for ( j = 1; j <= ndofs; j++ ) {
jdof = dman->giveDof(j);
if ( prescribedEquations ) {
eqNum = jdof->__givePrescribedEquationNumber();
} else {
eqNum = jdof->__giveEquationNumber();
}
if ( jdof->isPrimaryDof() && eqNum ) {
result &= pcbuff->packDouble( src->at(eqNum) );
}
}
}
return result;
}
示例6: packRemoteElementData
int
StructuralEngngModel :: packRemoteElementData(ProcessCommunicator &processComm)
{
int result = 1;
int i, size;
IntArray const *toSendMap = processComm.giveToSendMap();
CommunicationBuffer *send_buff = processComm.giveProcessCommunicatorBuff()->giveSendBuff();
Domain *domain = this->giveDomain(1);
size = toSendMap->giveSize();
for ( i = 1; i <= size; i++ ) {
result &= domain->giveElement( toSendMap->at(i) )->packUnknowns( * send_buff, this->giveCurrentStep() );
}
return result;
}
示例7: unpackDofManagers
int
StructuralEngngModel :: unpackDofManagers(FloatArray *dest, ProcessCommunicator &processComm, bool prescribedEquations)
{
int result = 1;
int i, size;
int j, ndofs, eqNum;
Domain *domain = this->giveDomain(1);
dofManagerParallelMode dofmanmode;
IntArray const *toRecvMap = processComm.giveToRecvMap();
ProcessCommunicatorBuff *pcbuff = processComm.giveProcessCommunicatorBuff();
DofManager *dman;
Dof *jdof;
double value;
size = toRecvMap->giveSize();
for ( i = 1; i <= size; i++ ) {
dman = domain->giveDofManager( toRecvMap->at(i) );
ndofs = dman->giveNumberOfDofs();
dofmanmode = dman->giveParallelMode();
for ( j = 1; j <= ndofs; j++ ) {
jdof = dman->giveDof(j);
if ( prescribedEquations ) {
eqNum = jdof->__givePrescribedEquationNumber();
} else {
eqNum = jdof->__giveEquationNumber();
}
if ( jdof->isPrimaryDof() && eqNum ) {
result &= pcbuff->unpackDouble(value);
if ( dofmanmode == DofManager_shared ) {
dest->at(eqNum) += value;
} else if ( dofmanmode == DofManager_remote ) {
dest->at(eqNum) = value;
} else {
_error("unpackReactions: unknown dof namager parallel mode");
}
}
}
}
return result;
}
示例8: unpackSharedDmanPartitions
int
ParmetisLoadBalancer :: unpackSharedDmanPartitions(ProcessCommunicator &pc)
{
int myrank = domain->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int _globnum, _locnum;
IntArray _partitions;
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
// init domain global2local map
domain->initGlobalDofManMap();
pcbuff->read(_globnum);
// unpack dofman data
while ( _globnum != PARMETISLB_END_DATA ) {
_partitions.restoreYourself(*pcbuff);
if ( ( _locnum = domain->dofmanGlobal2Local(_globnum) ) ) {
this->addSharedDofmanPartitions(_locnum, _partitions);
} else {
OOFEM_ERROR("internal error, unknown global dofman %d", _globnum);
}
/*
* fprintf (stderr,"[%d] Received shared plist of %d ", myrank, _globnum);
* for (int _i=1; _i<=dofManPartitions[_locnum-1].giveSize(); _i++)
* fprintf (stderr,"%d ", dofManPartitions[_locnum-1].at(_i));
* fprintf (stderr,"\n");
*/
pcbuff->read(_globnum);
}
return 1;
}
示例9: unpackRemoteElementData
int
StructuralEngngModel :: unpackRemoteElementData(ProcessCommunicator &processComm)
{
int result = 1;
int i, size;
IntArray const *toRecvMap = processComm.giveToRecvMap();
CommunicationBuffer *recv_buff = processComm.giveProcessCommunicatorBuff()->giveRecvBuff();
Element *element;
Domain *domain = this->giveDomain(1);
size = toRecvMap->giveSize();
for ( i = 1; i <= size; i++ ) {
element = domain->giveElement( toRecvMap->at(i) );
if ( element->giveParallelMode() == Element_remote ) {
result &= element->unpackAndUpdateUnknowns( * recv_buff, this->giveCurrentStep() );
} else {
_error("unpackRemoteElementData: element is not remote");
}
}
return result;
}
示例10: pcDataStream
int
LoadBalancer :: unpackMigratingData(Domain *d, ProcessCommunicator &pc)
{
// create temp space for dofManagers and elements
// merging should be made by domain ?
// maps of new dofmanagers and elements indexed by global number
// we can put local dofManagers and elements into maps (should be done before unpacking)
// int nproc=this->giveEngngModel()->giveNumberOfProcesses();
int myrank = d->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int _mode, _globnum, _type;
bool _newentry;
classType _etype;
IntArray _partitions, local_partitions;
//LoadBalancer::DofManMode dmode;
DofManager *dofman;
DomainTransactionManager *dtm = d->giveTransactionManager();
// **************************************************
// Unpack migrating data to remote partition
// **************************************************
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
ProcessCommDataStream pcDataStream(pcbuff);
pcbuff->unpackInt(_type);
// unpack dofman data
while ( _type != LOADBALANCER_END_DATA ) {
_etype = ( classType ) _type;
pcbuff->unpackInt(_mode);
switch ( _mode ) {
case LoadBalancer :: DM_Remote:
// receiving new local dofManager
pcbuff->unpackInt(_globnum);
/*
* _newentry = false;
* if ( ( dofman = dtm->giveDofManager(_globnum) ) == NULL ) {
* // data not available -> create a new one
* _newentry = true;
* dofman = CreateUsrDefDofManagerOfType(_etype, 0, d);
* }
*/
_newentry = true;
dofman = CreateUsrDefDofManagerOfType(_etype, 0, d);
dofman->setGlobalNumber(_globnum);
// unpack dofman state (this is the local dofman, not available on remote)
dofman->restoreContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState);
// unpack list of new partitions
pcbuff->unpackIntArray(_partitions);
dofman->setPartitionList(& _partitions);
dofman->setParallelMode(DofManager_local);
// add transaction if new entry allocated; otherwise existing one has been modified via returned dofman
if ( _newentry ) {
dtm->addTransaction(DomainTransactionManager :: DTT_ADD, DomainTransactionManager :: DCT_DofManager, _globnum, dofman);
}
//dmanMap[_globnum] = dofman;
break;
case LoadBalancer :: DM_Shared:
// receiving new shared dofManager, that was local on sending partition
// should be received only once (from partition where was local)
pcbuff->unpackInt(_globnum);
/*
* _newentry = false;
* if ( ( dofman = dtm->giveDofManager(_globnum) ) == NULL ) {
* // data not available -> mode should be SharedUpdate
* _newentry = true;
* dofman = CreateUsrDefDofManagerOfType(_etype, 0, d);
* }
*/
_newentry = true;
dofman = CreateUsrDefDofManagerOfType(_etype, 0, d);
dofman->setGlobalNumber(_globnum);
// unpack dofman state (this is the local dofman, not available on remote)
dofman->restoreContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState);
// unpack list of new partitions
pcbuff->unpackIntArray(_partitions);
dofman->setPartitionList(& _partitions);
dofman->setParallelMode(DofManager_shared);
#ifdef __VERBOSE_PARALLEL
fprintf(stderr, "[%d] received Shared new dofman [%d]\n", myrank, _globnum);
#endif
// add transaction if new entry allocated; otherwise existing one has been modified via returned dofman
if ( _newentry ) {
dtm->addTransaction(DomainTransactionManager :: DTT_ADD, DomainTransactionManager :: DCT_DofManager, _globnum, dofman);
}
//dmanMap[_globnum] = dofman;
break;
//.........这里部分代码省略.........