本文整理汇总了C++中ProcessCommunicatorBuff::packIntArray方法的典型用法代码示例。如果您正苦于以下问题:C++ ProcessCommunicatorBuff::packIntArray方法的具体用法?C++ ProcessCommunicatorBuff::packIntArray怎么用?C++ ProcessCommunicatorBuff::packIntArray使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ProcessCommunicatorBuff
的用法示例。
在下文中一共展示了ProcessCommunicatorBuff::packIntArray方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: packSharedDmanPartitions
int
ParmetisLoadBalancer :: packSharedDmanPartitions(ProcessCommunicator &pc)
{
int myrank = domain->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int ndofman, idofman;
DofManager *dofman;
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
// loop over dofManagers and pack shared dofMan data
ndofman = domain->giveNumberOfDofManagers();
for ( idofman = 1; idofman <= ndofman; idofman++ ) {
dofman = domain->giveDofManager(idofman);
// test if iproc is in list of existing shared partitions
if ( ( dofman->giveParallelMode() == DofManager_shared ) &&
( dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
// send new partitions to remote representation
// fprintf (stderr, "[%d] sending shared plist of %d to [%d]\n", myrank, dofman->giveGlobalNumber(), iproc);
pcbuff->packInt( dofman->giveGlobalNumber() );
pcbuff->packIntArray( * ( this->giveDofManPartitions(idofman) ) );
}
}
pcbuff->packInt(PARMETISLB_END_DATA);
return 1;
}
示例2: pcDataStream
int NonlocalMaterialWTP :: packMigratingElementDependencies(Domain *d, ProcessCommunicator &pc)
{
int myrank = d->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
ProcessCommDataStream pcDataStream(pcbuff);
int ielem, nelem = d->giveNumberOfElements();
int _globnum;
Element *elem;
for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
elem = d->giveElement(ielem);
if ( ( elem->giveParallelMode() == Element_local ) &&
( lb->giveElementPartition(ielem) == iproc ) ) {
// pack local element (node numbers shuld be global ones!!!)
// pack type
_globnum = elem->giveGlobalNumber();
pcbuff->packInt(_globnum);
pcbuff->packIntArray(nonlocElementDependencyMap [ _globnum ]);
}
} // end loop over elements
// pack end-of-element-record
pcbuff->packInt(NonlocalMaterialWTP_END_DATA);
return 1;
}
示例3: pcDataStream
int
LoadBalancer :: packMigratingData(Domain *d, ProcessCommunicator &pc)
{
int myrank = d->giveEngngModel()->giveRank();
int iproc = pc.giveRank();
int idofman, ndofman;
classType dtype;
DofManager *dofman;
LoadBalancer :: DofManMode dmode;
// **************************************************
// Pack migrating data to remote partition
// **************************************************
// pack dofManagers
if ( iproc == myrank ) {
return 1; // skip local partition
}
// query process communicator to use
ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
ProcessCommDataStream pcDataStream(pcbuff);
// loop over dofManagers
ndofman = d->giveNumberOfDofManagers();
for ( idofman = 1; idofman <= ndofman; idofman++ ) {
dofman = d->giveDofManager(idofman);
dmode = this->giveDofManState(idofman);
dtype = dofman->giveClassID();
// sync data to remote partition
// if dofman already present on remote partition then there is no need to sync
//if ((this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc))) {
if ( ( this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc) ) &&
( !dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
pcbuff->packInt(dtype);
pcbuff->packInt(dmode);
pcbuff->packInt( dofman->giveGlobalNumber() );
// pack dofman state (this is the local dofman, not available on remote)
/* this is a potential performance leak, sending shared dofman to a partition,
* in which is already shared does not require to send context (is already there)
* here for simplicity it is always send */
dofman->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState);
// send list of new partitions
pcbuff->packIntArray( * ( this->giveDofManPartitions(idofman) ) );
}
}
// pack end-of-dofman-section record
pcbuff->packInt(LOADBALANCER_END_DATA);
int ielem, nelem = d->giveNumberOfElements(), nsend = 0;
Element *elem;
for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
elem = d->giveElement(ielem);
if ( ( elem->giveParallelMode() == Element_local ) &&
( this->giveElementPartition(ielem) == iproc ) ) {
// pack local element (node numbers shuld be global ones!!!)
// pack type
pcbuff->packInt( elem->giveClassID() );
// nodal numbers shuld be packed as global !!
elem->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State);
nsend++;
}
} // end loop over elements
// pack end-of-element-record
pcbuff->packInt(LOADBALANCER_END_DATA);
OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: sending %d migrating elements to %d\n", myrank, nsend, iproc);
return 1;
}