本文整理汇总了C++中DofManager::givePartitionsConnectivitySize方法的典型用法代码示例。如果您正苦于以下问题:C++ DofManager::givePartitionsConnectivitySize方法的具体用法?C++ DofManager::givePartitionsConnectivitySize怎么用?C++ DofManager::givePartitionsConnectivitySize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DofManager
的用法示例。
在下文中一共展示了DofManager::givePartitionsConnectivitySize方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: solveYourselfAt
void NlDEIDynamic :: solveYourselfAt(TimeStep *tStep)
{
//
// Creates system of governing eq's and solves them at given time step.
//
Domain *domain = this->giveDomain(1);
int neq = this->giveNumberOfDomainEquations( 1, EModelDefaultEquationNumbering() );
int nman = domain->giveNumberOfDofManagers();
DofManager *node;
int i, k, j, jj;
double coeff, maxDt, maxOm = 0.;
double prevIncrOfDisplacement, incrOfDisplacement;
if ( initFlag ) {
#ifdef VERBOSE
OOFEM_LOG_DEBUG("Assembling mass matrix\n");
#endif
//
// Assemble mass matrix.
//
this->computeMassMtrx(massMatrix, maxOm, tStep);
if ( drFlag ) {
// If dynamic relaxation: Assemble amplitude load vector.
loadRefVector.resize(neq);
loadRefVector.zero();
this->computeLoadVector(loadRefVector, VM_Total, tStep);
#ifdef __PARALLEL_MODE
// Compute the processor part of load vector norm pMp
this->pMp = 0.0;
double my_pMp = 0.0, coeff = 1.0;
int eqNum, ndofman = domain->giveNumberOfDofManagers();
dofManagerParallelMode dofmanmode;
DofManager *dman;
for ( int dm = 1; dm <= ndofman; dm++ ) {
dman = domain->giveDofManager(dm);
dofmanmode = dman->giveParallelMode();
// Skip all remote and null dofmanagers
coeff = 1.0;
if ( ( dofmanmode == DofManager_remote ) || ( ( dofmanmode == DofManager_null ) ) ) {
continue;
} else if ( dofmanmode == DofManager_shared ) {
coeff = 1. / dman->givePartitionsConnectivitySize();
}
// For shared nodes we add locally an average = 1/givePartitionsConnectivitySize()*contribution,
for ( Dof *dof: *dman ) {
if ( dof->isPrimaryDof() && ( eqNum = dof->__giveEquationNumber() ) ) {
my_pMp += coeff * loadRefVector.at(eqNum) * loadRefVector.at(eqNum) / massMatrix.at(eqNum);
}
}
}
// Sum up the contributions from processors.
MPI_Allreduce(& my_pMp, & pMp, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
#else
this->pMp = 0.0;
for ( i = 1; i <= neq; i++ ) {
pMp += loadRefVector.at(i) * loadRefVector.at(i) / massMatrix.at(i);
}
#endif
// Solve for rate of loading process (parameter "c") (undamped system assumed),
if ( dumpingCoef < 1.e-3 ) {
c = 3.0 * this->pyEstimate / pMp / Tau / Tau;
} else {
c = this->pyEstimate * Tau * dumpingCoef * dumpingCoef * dumpingCoef / pMp /
( -3.0 / 2.0 + dumpingCoef * Tau + 2.0 * exp(-dumpingCoef * Tau) - 0.5 * exp(-2.0 * dumpingCoef * Tau) );
}
}
initFlag = 0;
}
if ( tStep->isTheFirstStep() ) {
//
// Special init step - Compute displacements at tstep 0.
//
displacementVector.resize(neq);
displacementVector.zero();
previousIncrementOfDisplacementVector.resize(neq);
previousIncrementOfDisplacementVector.zero();
velocityVector.resize(neq);
velocityVector.zero();
accelerationVector.resize(neq);
accelerationVector.zero();
for ( j = 1; j <= nman; j++ ) {
node = domain->giveDofManager(j);
for ( Dof *dof: *node ) {
// Ask for initial values obtained from
// bc (boundary conditions) and ic (initial conditions)
//.........这里部分代码省略.........