当前位置: 首页>>代码示例>>C++>>正文


C++ Epetra_Comm::MinAll方法代码示例

本文整理汇总了C++中Epetra_Comm::MinAll方法的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_Comm::MinAll方法的具体用法?C++ Epetra_Comm::MinAll怎么用?C++ Epetra_Comm::MinAll使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Epetra_Comm的用法示例。


在下文中一共展示了Epetra_Comm::MinAll方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: checkmap

int checkmap(Epetra_Map & Map, int NumGlobalElements, int NumMyElements, 
	     int *MyGlobalElements, int IndexBase, Epetra_Comm& Comm,
	     bool DistributedGlobal)
{
  int i, ierr=0, forierr = 0;

  EPETRA_TEST_ERR(!Map.ConstantElementSize(),ierr);

  EPETRA_TEST_ERR(DistributedGlobal!=Map.DistributedGlobal(),ierr);


  EPETRA_TEST_ERR(Map.ElementSize()!=1,ierr);
  int *MyElementSizeList = new int[NumMyElements];

  EPETRA_TEST_ERR(Map.ElementSizeList(MyElementSizeList)!=0,ierr);

  forierr = 0;
  for (i=0; i<NumMyElements; i++) forierr += MyElementSizeList[i]!=1;
  EPETRA_TEST_ERR(forierr,ierr);

  delete [] MyElementSizeList;

  const Epetra_Comm & Comm1 = Map.Comm();

  EPETRA_TEST_ERR(Comm1.NumProc()!=Comm.NumProc(),ierr);

  EPETRA_TEST_ERR(Comm1.MyPID()!=Comm.MyPID(),ierr);

  EPETRA_TEST_ERR(Map.IndexBase()!=IndexBase,ierr);

  EPETRA_TEST_ERR(!Map.LinearMap() && MyGlobalElements==0,ierr);

  EPETRA_TEST_ERR(Map.LinearMap() && MyGlobalElements!=0,ierr);

  EPETRA_TEST_ERR(Map.MaxAllGID()!=NumGlobalElements-1+IndexBase,ierr);

  EPETRA_TEST_ERR(Map.MaxElementSize()!=1,ierr);

  int MaxLID = Map.MaxLID();
  EPETRA_TEST_ERR(MaxLID!=NumMyElements-1,ierr);

  int MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase;
  if (Comm.MyPID()>2) MaxMyGID+=3;
  if (!DistributedGlobal) MaxMyGID = NumMyElements-1+IndexBase;
  EPETRA_TEST_ERR(Map.MaxMyGID()!=MaxMyGID,ierr);

  EPETRA_TEST_ERR(Map.MinAllGID()!=IndexBase,ierr);

  EPETRA_TEST_ERR(Map.MinElementSize()!=1,ierr);

  EPETRA_TEST_ERR(Map.MinLID()!=0,ierr);

  int MinMyGID = Comm.MyPID()*NumMyElements+IndexBase;
  if (Comm.MyPID()>2) MinMyGID+=3;
  if (!DistributedGlobal) MinMyGID = 0;
  EPETRA_TEST_ERR(Map.MinMyGID()!=MinMyGID,ierr);
  
  int * MyGlobalElements1 = new int[NumMyElements];
  EPETRA_TEST_ERR(Map.MyGlobalElements(MyGlobalElements1)!=0,ierr);

  forierr = 0;
  if (MyGlobalElements==0)
    {
      for (i=0; i<NumMyElements; i++) 
	forierr += MyGlobalElements1[i]!=MinMyGID+i;
      EPETRA_TEST_ERR(forierr,ierr);
    }
  else {
    for (i=0; i<NumMyElements; i++)
      forierr += MyGlobalElements[i]!=MyGlobalElements1[i];
    EPETRA_TEST_ERR(forierr,ierr);
  }
  EPETRA_TEST_ERR(Map.NumGlobalElements()!=NumGlobalElements,ierr);
  
  EPETRA_TEST_ERR(Map.NumGlobalPoints()!=NumGlobalElements,ierr);
  
  EPETRA_TEST_ERR(Map.NumMyElements()!=NumMyElements,ierr);  

  EPETRA_TEST_ERR(Map.NumMyPoints()!=NumMyElements,ierr);

  int MaxMyGID2 = Map.GID(Map.LID(MaxMyGID));
  EPETRA_TEST_ERR(MaxMyGID2 != MaxMyGID,ierr);
  int MaxLID2 = Map.LID(Map.GID(MaxLID));
  EPETRA_TEST_ERR(MaxLID2 != MaxLID,ierr);

  EPETRA_TEST_ERR(Map.GID(MaxLID+1) != IndexBase-1,ierr);// MaxLID+1 doesn't exist
  EPETRA_TEST_ERR(Map.LID(MaxMyGID+1) != -1,ierr);// MaxMyGID+1 doesn't exist or is on a different processor

  EPETRA_TEST_ERR(!Map.MyGID(MaxMyGID),ierr);
  EPETRA_TEST_ERR(Map.MyGID(MaxMyGID+1),ierr);

  EPETRA_TEST_ERR(!Map.MyLID(MaxLID),ierr);
  EPETRA_TEST_ERR(Map.MyLID(MaxLID+1),ierr);

  EPETRA_TEST_ERR(!Map.MyGID(Map.GID(MaxLID)),ierr);
  EPETRA_TEST_ERR(Map.MyGID(Map.GID(MaxLID+1)),ierr);

  EPETRA_TEST_ERR(!Map.MyLID(Map.LID(MaxMyGID)),ierr);
  EPETRA_TEST_ERR(Map.MyLID(Map.LID(MaxMyGID+1)),ierr);

//.........这里部分代码省略.........
开发者ID:cakeisalie,项目名称:oomphlib_003,代码行数:101,代码来源:checkmap.cpp

示例2: ComputeCriticalTimeStep

double PeridigmNS::ComputeCriticalTimeStep(const Epetra_Comm& comm, PeridigmNS::Block& block){

  Teuchos::RCP<PeridigmNS::NeighborhoodData> neighborhoodData = block.getNeighborhoodData();
  const int numOwnedPoints = neighborhoodData->NumOwnedPoints();
  const int* ownedIDs = neighborhoodData->OwnedIDs();
  const int* neighborhoodList = neighborhoodData->NeighborhoodList();
  Teuchos::RCP<const PeridigmNS::Material> materialModel = block.getMaterialModel();

  double density = materialModel()->Density();
  double bulkModulus = materialModel()->BulkModulus();

  double horizon(0.0);
  string blockName = block.getName();
  PeridigmNS::HorizonManager& horizonManager = PeridigmNS::HorizonManager::self();
  bool blockHasConstantHorizon = horizonManager.blockHasConstantHorizon(blockName);
  if(blockHasConstantHorizon)
    horizon = horizonManager.getBlockConstantHorizonValue(blockName);

  double *cellVolume, *x;
  PeridigmNS::FieldManager& fieldManager = PeridigmNS::FieldManager::self();
  block.getData(fieldManager.getFieldId("Volume"), PeridigmField::STEP_NONE)->ExtractView(&cellVolume);
  block.getData(fieldManager.getFieldId("Model_Coordinates"), PeridigmField::STEP_NONE)->ExtractView(&x);

  const double pi = boost::math::constants::pi<double>();
  double springConstant(0.0);
  if(blockHasConstantHorizon)
    springConstant = 18.0*bulkModulus/(pi*horizon*horizon*horizon*horizon);

  double minCriticalTimeStep = 1.0e50;

  int neighborhoodListIndex = 0;
  for(int iID=0 ; iID<numOwnedPoints ; ++iID){

    double timestepDenominator = 0.0;
    int nodeID = ownedIDs[iID];
    double X[3] = { x[nodeID*3], x[nodeID*3+1], x[nodeID*3+2] };
	int numNeighbors = neighborhoodList[neighborhoodListIndex++];

    if(!blockHasConstantHorizon){
      double delta = horizonManager.evaluateHorizon(blockName, X[0], X[1], X[2]);
      springConstant = 18.0*bulkModulus/(pi*delta*delta*delta*delta);
    }

    for(int iNID=0 ; iNID<numNeighbors ; ++iNID){
      int neighborID = neighborhoodList[neighborhoodListIndex++];
      double neighborVolume = cellVolume[neighborID];
      double initialDistance = sqrt( (X[0] - x[neighborID*3  ])*(X[0] - x[neighborID*3  ]) +
                                     (X[1] - x[neighborID*3+1])*(X[1] - x[neighborID*3+1]) +
                                     (X[2] - x[neighborID*3+2])*(X[2] - x[neighborID*3+2]) );

      // Issue a warning if the bond length is very very small (as in zero)
      static bool warningGiven = false;
      if(!warningGiven && initialDistance < 1.0e-50){
        cout << "\nWarning:  Possible zero length bond detected (length = " << initialDistance << ")." << endl;
        cout << "            Bonds of length zero are not valid, the input mesh may contain coincident nodes.\n" << endl;
        warningGiven = true;
      }

      timestepDenominator += neighborVolume*springConstant/initialDistance;
    }

    double criticalTimeStep = 1.0e50;
    if(numNeighbors > 0)
      criticalTimeStep = sqrt(2.0*density/timestepDenominator);
    if(criticalTimeStep < minCriticalTimeStep)
      minCriticalTimeStep = criticalTimeStep;
  }

  // Find the minimum time step for this block across all processors
  double globalMinCriticalTimeStep;
  comm.MinAll(&minCriticalTimeStep, &globalMinCriticalTimeStep, 1);

  return globalMinCriticalTimeStep;
}
开发者ID:DrRokkam,项目名称:peridigm,代码行数:74,代码来源:Peridigm_CriticalTimeStep.cpp

示例3: procZeroMap

void
example (const Epetra_Comm& comm)
{
  // The global number of rows in the matrix A to create.  We scale
  // this relative to the number of (MPI) processes, so that no matter
  // how many MPI processes you run, every process will have 10 rows.
  const global_ordinal_type numGblElts = 10 * comm.NumProc ();
  // The global min global index in all the Maps here.
  const global_ordinal_type indexBase = 0;

  // Local error code for use below.
  //
  // In the ideal case, we would use this to emulate behavior like
  // that of Haskell's Maybe in the context of MPI.  That is, if one
  // process experiences an error, we don't want to abort early and
  // cause the other processes to deadlock on MPI communication
  // operators.  Rather, we want to chain along the local error state,
  // until we reach a point where it's natural to pass along that
  // state with other processes.  For example, if one is doing an
  // MPI_Allreduce anyway, it makes sense to pass along one more bit
  // of information: whether the calling process is in a local error
  // state.  Epetra's interface doesn't let one chain the local error
  // state in this way, so we use extra collectives below to propagate
  // that state.  The code below uses very conservative error checks;
  // typical user code would not need to be so conservative and could
  // therefore avoid all the all-reduces.
  int lclerr = 0;

  // Construct a Map that is global (not locally replicated), but puts
  // all the equations on MPI Proc 0.
  const int procZeroMapNumLclElts = (comm.MyPID () == 0) ?
    numGblElts :
    static_cast<global_ordinal_type> (0);
  Epetra_Map procZeroMap (numGblElts, procZeroMapNumLclElts, indexBase, comm);

  // Construct a Map that puts approximately the same number of
  // equations on each processor.
  Epetra_Map globalMap (numGblElts, indexBase, comm);

  // Create a sparse matrix using procZeroMap.
  Epetra_CrsMatrix* A = createCrsMatrix (procZeroMap);
  if (A == NULL) {
    lclerr = 1;
  }

  // Make sure that sparse matrix creation succeeded.  Normally you
  // don't have to check this; we are being extra conservative because
  // this example is also a test.  Even though the matrix's rows live
  // entirely on Process 0, the matrix is nonnull on all processes in
  // its Map's communicator.
  int gblerr = 0;
  (void) comm.MaxAll (&lclerr, &gblerr, 1);
  if (gblerr != 0) {
    throw std::runtime_error ("createCrsMatrix returned NULL on at least one "
                              "process.");
  }

  //
  // We've created a sparse matrix whose rows live entirely on MPI
  // Process 0.  Now we want to distribute it over all the processes.
  //

  // Redistribute the matrix.  Since both the source and target Maps
  // are one-to-one, we could use either an Import or an Export.  If
  // only the source Map were one-to-one, we would have to use an
  // Import; if only the target Map were one-to-one, we would have to
  // use an Export.  We do not allow redistribution using Import or
  // Export if neither source nor target Map is one-to-one.

  // Make an export object with procZeroMap as the source Map, and
  // globalMap as the target Map.  The Export type has the same
  // template parameters as a Map.  Note that Export does not depend
  // on the Scalar template parameter of the objects it
  // redistributes.  You can reuse the same Export for different
  // Tpetra object types, or for Tpetra objects of the same type but
  // different Scalar template parameters (e.g., Scalar=float or
  // Scalar=double).
  Epetra_Export exporter (procZeroMap, globalMap);

  // Make a new sparse matrix whose row map is the global Map.
  Epetra_CrsMatrix B (Copy, globalMap, 0);

  // Redistribute the data, NOT in place, from matrix A (which lives
  // entirely on Proc 0) to matrix B (which is distributed evenly over
  // the processes).
  //
  // Export() has collective semantics, so we must always call it on
  // all processes collectively.  This is why we don't select on
  // lclerr, as we do for the local operations above.
  lclerr = B.Export (*A, exporter, Insert);

  // Make sure that the Export succeeded.  Normally you don't have to
  // check this; we are being extra conservative because this example
  // example is also a test.  We test both min and max, since lclerr
  // may be negative, zero, or positive.
  gblerr = 0;
  (void) comm.MinAll (&lclerr, &gblerr, 1);
  if (gblerr != 0) {
    throw std::runtime_error ("Export() failed on at least one process.");
  }
//.........这里部分代码省略.........
开发者ID:LaHaine,项目名称:ohpc,代码行数:101,代码来源:lesson_epetra_dataredist.cpp


注:本文中的Epetra_Comm::MinAll方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。