当前位置: 首页>>代码示例>>C++>>正文


C++ DofManager::giveGlobalNumber方法代码示例

本文整理汇总了C++中DofManager::giveGlobalNumber方法的典型用法代码示例。如果您正苦于以下问题:C++ DofManager::giveGlobalNumber方法的具体用法?C++ DofManager::giveGlobalNumber怎么用?C++ DofManager::giveGlobalNumber使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在DofManager的用法示例。


在下文中一共展示了DofManager::giveGlobalNumber方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: pcDataStream

int NonlocalMaterialWTP :: unpackRemoteElements(Domain *d, ProcessCommunicator &pc)
{
    int myrank = d->giveEngngModel()->giveRank();
    int iproc = pc.giveRank();
    std :: string _type;
    DofManager *dofman;
    IntArray _partitions;

    if ( iproc == myrank ) {
        return 1;                // skip local partition
    }

    // query process communicator to use
    ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
    ProcessCommDataStream pcDataStream(pcbuff);

    // unpack dofman data
    do {
        pcbuff->unpackString(_type);
        if ( _type.size() == 0 ) {
            break;
        }
        dofman = classFactory.createDofManager(_type.c_str(), 0, d);
        dofman->restoreContext(& pcDataStream, CM_Definition | CM_State | CM_UnknownDictState);
        dofman->setParallelMode(DofManager_null);
        if ( d->dofmanGlobal2Local( dofman->giveGlobalNumber() ) ) {
            // record already exist
            delete dofman;
        } else {
            d->giveTransactionManager()->addDofManTransaction(DomainTransactionManager :: DTT_ADD,
                                                              dofman->giveGlobalNumber(),
                                                              dofman);
        }
    } while ( 1 );


    // unpack element data
    Element *elem;
    _partitions.resize(1);
    _partitions.at(1) = iproc;
    do {
        pcbuff->unpackString(_type);
        if ( _type.size() == 0 ) {
            break;
        }

        elem = classFactory.createElement(_type.c_str(), 0, d);
        elem->restoreContext(& pcDataStream, CM_Definition | CM_State);
        elem->setParallelMode(Element_remote);
        elem->setPartitionList(_partitions);
        d->giveTransactionManager()->addElementTransaction(DomainTransactionManager :: DTT_ADD,
                                                           elem->giveGlobalNumber(), elem);
    } while ( 1 );

    return 1;
}
开发者ID:vivianyw,项目名称:oofem,代码行数:56,代码来源:nonlocalmatwtp.C

示例2: giveNumDofManEnrichments

int EnrichmentItem :: giveNumDofManEnrichments(const DofManager &iDMan) const
{
    int nodeInd     = iDMan.giveGlobalNumber();
    auto res = mNodeEnrMarkerMap.find(nodeInd);

    if ( res != mNodeEnrMarkerMap.end() ) {
        switch ( res->second ) {
        case NodeEnr_NONE:
            return 0;

            break;
        case NodeEnr_BULK:
            return 1;

            break;
        case NodeEnr_START_TIP:
            return mpEnrichmentFrontStart->giveNumEnrichments(iDMan);

            break;
        case NodeEnr_END_TIP:
            return mpEnrichmentFrontEnd->giveNumEnrichments(iDMan);

            break;
        case NodeEnr_START_AND_END_TIP:
            return mpEnrichmentFrontStart->giveNumEnrichments(iDMan) + mpEnrichmentFrontEnd->giveNumEnrichments(iDMan);

            break;
        }
    }

    return 0;
}
开发者ID:framby,项目名称:OOFEM_Johannes,代码行数:32,代码来源:enrichmentitem.C

示例3: packSharedDmanPartitions

int
ParmetisLoadBalancer :: packSharedDmanPartitions(ProcessCommunicator &pc)
{
    int myrank = domain->giveEngngModel()->giveRank();
    int iproc = pc.giveRank();
    int ndofman, idofman;
    DofManager *dofman;

    if ( iproc == myrank ) {
        return 1;                // skip local partition
    }

    // query process communicator to use
    ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
    // loop over dofManagers and pack shared dofMan data
    ndofman = domain->giveNumberOfDofManagers();
    for ( idofman = 1; idofman <= ndofman; idofman++ ) {
        dofman = domain->giveDofManager(idofman);
        // test if iproc is in list of existing shared partitions
        if ( ( dofman->giveParallelMode() == DofManager_shared ) &&
            ( dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
            // send new partitions to remote representation
            // fprintf (stderr, "[%d] sending shared plist of %d to [%d]\n", myrank, dofman->giveGlobalNumber(), iproc);
            pcbuff->write( dofman->giveGlobalNumber() );
            this->giveDofManPartitions(idofman)->storeYourself(*pcbuff);
        }
    }

    pcbuff->write((int)PARMETISLB_END_DATA);
    return 1;
}
开发者ID:Benjamin-git,项目名称:OOFEM_Jim,代码行数:31,代码来源:parmetisloadbalancer.C

示例4: giveDofManDofIDMask

void
TrPlaneStress2dXFEM :: giveDofManDofIDMask(int inode, IntArray &answer) const
{
    // Continuous part
    TrPlaneStress2d :: giveDofManDofIDMask(inode, answer);

    // Discontinuous part
    if( this->giveDomain()->hasXfemManager() ) {
        DofManager *dMan = giveDofManager(inode);
        XfemManager *xMan = giveDomain()->giveXfemManager();

        const std::vector<int> &nodeEiIndices = xMan->giveNodeEnrichmentItemIndices( dMan->giveGlobalNumber() );
        for ( size_t i = 0; i < nodeEiIndices.size(); i++ ) {
            EnrichmentItem *ei = xMan->giveEnrichmentItem(nodeEiIndices[i]);
            if ( ei->isDofManEnriched(* dMan) ) {
                IntArray eiDofIdArray;
                ei->computeEnrichedDofManDofIdArray(eiDofIdArray, *dMan);
                answer.followedBy(eiDofIdArray);
            }
        }
    }
}
开发者ID:rreissnerr,项目名称:oofem,代码行数:22,代码来源:trplanstrssxfem.C

示例5: sizeToSend


//.........这里部分代码省略.........
            }

            if ( minrank == myrank ) { // count to send
                for ( j = 1; j <= psize; j++ ) {
#ifdef __VERBOSE_PARALLEL
                    nrecToSend( plist->at(j) )++;
#endif
                    sizeToSend( plist->at(j) ) += ( 1 + n );  // ndofs+dofman number
                }
            } else {
                nrecToReceive(minrank)++;
                sizeToRecv(minrank) += ( 1 + n );      // ndofs+dofman number
            }
        }
    }

#ifdef __VERBOSE_PARALLEL
    for ( i = 0; i < nproc; i++ ) {
        OOFEM_LOG_INFO("[%d] Record Statistics: Sending %d Receiving %d to %d\n",
                       myrank, nrecToSend(i), nrecToReceive(i), i);
    }

#endif



    std :: map< int, int >globloc; //  global->local mapping for shared
    // number local guys
    int globeq = offset;
    for ( i = 1; i <= ndofman; i++ ) {
        dman = d->giveDofManager(i);
        //if (dman->giveParallelMode() == DofManager_shared) {
        if ( isShared(dman) ) {
            globloc [ dman->giveGlobalNumber() ] = i; // build global->local mapping for shared

            plist = dman->givePartitionList();
            psize = plist->giveSize();
            int minrank = myrank;
            for ( j = 1; j <= psize; j++ ) {
                minrank = min( minrank, plist->at(j) );
            }

            if ( minrank == myrank ) { // local
                ndofs = dman->giveNumberOfDofs();
                for ( j = 1; j <= ndofs; j++ ) {
                    if ( dman->giveDof(j)->isPrimaryDof() ) {
                        int eq;
                        if ( et == et_standard ) {
                            eq = dman->giveDof(j)->giveEquationNumber(dn);
                        } else {
                            eq = dman->giveDof(j)->giveEquationNumber(dpn);
                        }

                        if ( eq ) {
                            locGlobMap.at(eq) = globeq++;
                        }
                    }
                }
            }

            //} else if (dman->giveParallelMode() == DofManager_local) {
        } else {
            ndofs = dman->giveNumberOfDofs();
            for ( j = 1; j <= ndofs; j++ ) {
                if ( dman->giveDof(j)->isPrimaryDof() ) {
                    int eq;
开发者ID:JimBrouzoulis,项目名称:oofem-1,代码行数:67,代码来源:petscordering.C

示例6: domainNodeRecvCount

void
ProblemCommunicator :: setUpCommunicationMapsForElementCut(EngngModel *pm,
                                                           bool excludeSelfCommFlag)
{
    Domain *domain = pm->giveDomain(1);
    int nnodes = domain->giveNumberOfDofManagers();
    int i, j, partition;

    if ( this->mode == ProblemCommMode__ELEMENT_CUT ) {
        /*
         * Initially, each partition knows for which nodes a receive
         * is needed (and can therefore compute easily the recv map),
         * but does not know for which nodes it should send data to which
         * partition. Hence, the communication setup is performed by
         * broadcasting "send request" lists of nodes for which
         * a partition expects to receive data (ie. of those nodes
         * which the partition uses, but does not own) to all
         * collaborating processes. The "send request" list are
         * converted into send maps.
         */

        // receive maps can be build locally,
        // but send maps should be assembled from broadcasted lists (containing
        // expected receive nodes) of remote partitions.

        // first build local receive map
        IntArray domainNodeRecvCount(size);
        const IntArray *partitionList;
        DofManager *dofMan;
        //Element    *element;
        int domainRecvListSize = 0, domainRecvListPos = 0;
        //int nelems;
        int result = 1;

        for ( i = 1; i <= nnodes; i++ ) {
            partitionList = domain->giveDofManager(i)->givePartitionList();
            if ( domain->giveDofManager(i)->giveParallelMode() == DofManager_remote ) {
                // size of partitionList should be 1 <== only ine master
                for ( j = 1; j <= partitionList->giveSize(); j++ ) {
                    if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
                        domainRecvListSize++;
                        domainNodeRecvCount.at(partitionList->at(j) + 1)++;
                    }
                }
            }
        }

        // build maps simultaneously
        IntArray pos(size);
        IntArray **maps = new IntArray * [ size ];
        for ( i = 0; i < size; i++ ) {
            maps [ i ] = new IntArray( domainNodeRecvCount.at(i + 1) );
        }

        // allocate also domain receive list to be broadcasted
        IntArray domainRecvList(domainRecvListSize);

        if ( domainRecvListSize ) {
            for ( i = 1; i <= nnodes; i++ ) {
                // test if node is remote DofMan
                dofMan = domain->giveDofManager(i);
                if ( dofMan->giveParallelMode() == DofManager_remote ) {
                    domainRecvList.at(++domainRecvListPos) = dofMan->giveGlobalNumber();

                    partitionList = domain->giveDofManager(i)->givePartitionList();
                    // size of partitionList should be 1 <== only ine master
                    for ( j = 1; j <= partitionList->giveSize(); j++ ) {
                        if ( !( excludeSelfCommFlag && ( this->rank == partitionList->at(j) ) ) ) {
                            partition = partitionList->at(j);
                            maps [ partition ]->at( ++pos.at(partition + 1) ) = i;
                        }
                    }
                }
            }
        }

        // set up process recv communicator maps
        for ( i = 0; i < size; i++ ) {
            this->setProcessCommunicatorToRecvArry(this->giveProcessCommunicator(i), * maps [ i ]);
            //this->giveDomainCommunicator(i)->setToRecvArry (this->engngModel, *maps[i]);
        }

        // delete local maps
        for ( i = 0; i < size; i++ ) {
            delete maps [ i ];
        }

        delete maps;

        // to assemble send maps, we must analyze broadcasted remote domain send lists
        // and we must also broadcast our send list.

#ifdef __VERBOSE_PARALLEL
        VERBOSEPARALLEL_PRINT("ProblemCommunicator::setUpCommunicationMaps", "Element-cut broadcasting started", rank);
#endif


        StaticCommunicationBuffer commBuff(MPI_COMM_WORLD);
        IntArray remoteDomainRecvList;
        IntArray toSendMap;
//.........这里部分代码省略.........
开发者ID:JimBrouzoulis,项目名称:oofem-1,代码行数:101,代码来源:problemcomm.C

示例7: pcDataStream

int
LoadBalancer :: packMigratingData(Domain *d, ProcessCommunicator &pc)
{
    int myrank = d->giveEngngModel()->giveRank();
    int iproc = pc.giveRank();
    int idofman, ndofman;
    classType dtype;
    DofManager *dofman;
    LoadBalancer :: DofManMode dmode;

    //  **************************************************
    //  Pack migrating data to remote partition
    //  **************************************************

    // pack dofManagers
    if ( iproc == myrank ) {
        return 1;                // skip local partition
    }

    // query process communicator to use
    ProcessCommunicatorBuff *pcbuff = pc.giveProcessCommunicatorBuff();
    ProcessCommDataStream pcDataStream(pcbuff);
    // loop over dofManagers
    ndofman = d->giveNumberOfDofManagers();
    for ( idofman = 1; idofman <= ndofman; idofman++ ) {
        dofman = d->giveDofManager(idofman);
        dmode = this->giveDofManState(idofman);
        dtype = dofman->giveClassID();
        // sync data to remote partition
        // if dofman already present on remote partition then there is no need to sync
        //if ((this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc))) {
        if ( ( this->giveDofManPartitions(idofman)->findFirstIndexOf(iproc) ) &&
            ( !dofman->givePartitionList()->findFirstIndexOf(iproc) ) ) {
            pcbuff->packInt(dtype);
            pcbuff->packInt(dmode);
            pcbuff->packInt( dofman->giveGlobalNumber() );

            // pack dofman state (this is the local dofman, not available on remote)
            /* this is a potential performance leak, sending shared dofman to a partition,
             * in which is already shared does not require to send context (is already there)
             * here for simplicity it is always send */
            dofman->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State | CM_UnknownDictState);
            // send list of new partitions
            pcbuff->packIntArray( * ( this->giveDofManPartitions(idofman) ) );
        }
    }

    // pack end-of-dofman-section record
    pcbuff->packInt(LOADBALANCER_END_DATA);

    int ielem, nelem = d->giveNumberOfElements(), nsend = 0;

    Element *elem;

    for ( ielem = 1; ielem <= nelem; ielem++ ) { // begin loop over elements
        elem = d->giveElement(ielem);
        if ( ( elem->giveParallelMode() == Element_local ) &&
            ( this->giveElementPartition(ielem) == iproc ) ) {
            // pack local element (node numbers shuld be global ones!!!)
            // pack type
            pcbuff->packInt( elem->giveClassID() );
            // nodal numbers shuld be packed as global !!
            elem->saveContext(& pcDataStream, CM_Definition | CM_DefinitionGlobal | CM_State);
            nsend++;
        }
    } // end loop over elements

    // pack end-of-element-record
    pcbuff->packInt(LOADBALANCER_END_DATA);

    OOFEM_LOG_RELEVANT("[%d] LoadBalancer:: sending %d migrating elements to %d\n", myrank, nsend, iproc);

    return 1;
}
开发者ID:MartinFagerstrom,项目名称:oofem,代码行数:74,代码来源:loadbalancer.C

示例8: giveCompositeExportData


//.........这里部分代码省略.........
            FloatArray average;
            std :: unique_ptr< IntegrationRule > &iRule = integrationRulesArray [ 0 ];
            VTKXMLExportModule :: computeIPAverage(average, iRule.get(), this, type, tStep);

            FloatArray averageV9(9);
            averageV9.at(1) = average.at(1);
            averageV9.at(5) = average.at(2);
            averageV9.at(9) = average.at(3);
            averageV9.at(6) = averageV9.at(8) = average.at(4);
            averageV9.at(3) = averageV9.at(7) = average.at(5);
            averageV9.at(2) = averageV9.at(4) = average.at(6);

            vtkPieces[0].setCellVar( i, 1, averageV9 );
        }


        // Export of XFEM related quantities
        if ( domain->hasXfemManager() ) {
            XfemManager *xMan = domain->giveXfemManager();

            int nEnrIt = xMan->giveNumberOfEnrichmentItems();
            vtkPieces[0].setNumberOfInternalXFEMVarsToExport(xMan->vtkExportFields.giveSize(), nEnrIt, numTotalNodes);

            const int nDofMan = giveNumberOfDofManagers();


            for ( int field = 1; field <= xMan->vtkExportFields.giveSize(); field++ ) {
                XFEMStateType xfemstype = ( XFEMStateType ) xMan->vtkExportFields [ field - 1 ];

                for ( int enrItIndex = 1; enrItIndex <= nEnrIt; enrItIndex++ ) {
                    EnrichmentItem *ei = xMan->giveEnrichmentItem(enrItIndex);
                    for ( int nodeInd = 1; nodeInd <= numTotalNodes; nodeInd++ ) {

                        const FloatArray &x = nodeCoords[nodeInd-1];
                        FloatArray locCoord;
                        computeLocalCoordinates(locCoord, x);

                        FloatArray N;
                        FEInterpolation *interp = giveInterpolation();
                        interp->evalN( N, locCoord, FEIElementGeometryWrapper(this) );


                        if ( xfemstype == XFEMST_LevelSetPhi ) {
                            double levelSet = 0.0, levelSetInNode = 0.0;

                            for(int elNodeInd = 1; elNodeInd <= nDofMan; elNodeInd++) {
                                DofManager *dMan = giveDofManager(elNodeInd);
                                ei->evalLevelSetNormalInNode(levelSetInNode, dMan->giveGlobalNumber(), *(dMan->giveCoordinates()) );

                                levelSet += N.at(elNodeInd)*levelSetInNode;
                            }


                            FloatArray valueArray = {levelSet};
                            vtkPieces[0].setInternalXFEMVarInNode(field, enrItIndex, nodeInd, valueArray);

                        } else if ( xfemstype == XFEMST_LevelSetGamma ) {
                            double levelSet = 0.0, levelSetInNode = 0.0;

                            for(int elNodeInd = 1; elNodeInd <= nDofMan; elNodeInd++) {
                                DofManager *dMan = giveDofManager(elNodeInd);
                                ei->evalLevelSetTangInNode(levelSetInNode, dMan->giveGlobalNumber(), *(dMan->giveCoordinates()) );

                                levelSet += N.at(elNodeInd)*levelSetInNode;
                            }


                            FloatArray valueArray = {levelSet};
                            vtkPieces[0].setInternalXFEMVarInNode(field, enrItIndex, nodeInd, valueArray);

                        } else if ( xfemstype == XFEMST_NodeEnrMarker ) {
                            double nodeEnrMarker = 0.0, nodeEnrMarkerInNode = 0.0;

                            for(int elNodeInd = 1; elNodeInd <= nDofMan; elNodeInd++) {
                                DofManager *dMan = giveDofManager(elNodeInd);
                                ei->evalNodeEnrMarkerInNode(nodeEnrMarkerInNode, dMan->giveGlobalNumber() );

                                nodeEnrMarker += N.at(elNodeInd)*nodeEnrMarkerInNode;
                            }


                            FloatArray valueArray = {nodeEnrMarker};
                            vtkPieces[0].setInternalXFEMVarInNode(field, enrItIndex, nodeInd, valueArray);
                        }

                    }
                }
            }
        }

    }
    else {
        // Enriched and cut element

        XfemStructuralElementInterface::giveSubtriangulationCompositeExportData(vtkPieces, primaryVarsToExport, internalVarsToExport, cellVarsToExport, tStep);


    }

}
开发者ID:rreissnerr,项目名称:oofem,代码行数:101,代码来源:trplanstrssxfem.C


注:本文中的DofManager::giveGlobalNumber方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。