当前位置: 首页>>代码示例>>C++>>正文


C++ Epetra_MpiComm::NumProc方法代码示例

本文整理汇总了C++中Epetra_MpiComm::NumProc方法的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_MpiComm::NumProc方法的具体用法?C++ Epetra_MpiComm::NumProc怎么用?C++ Epetra_MpiComm::NumProc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Epetra_MpiComm的用法示例。


在下文中一共展示了Epetra_MpiComm::NumProc方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: main

int main(int argv, char* argc[])
{
  using Teuchos::RCP;
  using Teuchos::rcp;

#ifdef HAVE_MPI
  MPI_Init (&argv, &argc);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // Create a parameter list
  Teuchos::ParameterList GaleriList;

  // Set the number of discretization points in the x and y direction.
  GaleriList.set ("nx", 10 * Comm.NumProc ());
  GaleriList.set ("ny", 10);

  // Create the map and matrix using the parameter list for a 2D Laplacian.
  RCP<Epetra_Map> Map = rcp (CreateMap ("Cartesian2D", Comm, GaleriList));
  RCP<Epetra_CrsMatrix> Matrix = rcp (CreateCrsMatrix ("Laplace2D", &*Map, GaleriList));

  // Print out the map and matrices  
  Map->Print (std::cout);
  Matrix->Print (std::cout);

#ifdef HAVE_MPI
  MPI_Finalize ();
#endif
  return 0;
}
开发者ID:Blevs,项目名称:Trilinos_tutorial,代码行数:32,代码来源:Galeri_Linear_System.cpp

示例2: myComm

// This constructor is for just one subdomain, so only adds the info
// for multiple time steps on the domain. No two-level parallelism.
MultiMpiComm::MultiMpiComm(const Epetra_MpiComm& EpetraMpiComm_, int numTimeSteps_,
                           const Teuchos::EVerbosityLevel verbLevel) :
        Epetra_MpiComm(EpetraMpiComm_),
        Teuchos::VerboseObject<MultiMpiComm>(verbLevel),
        myComm(Teuchos::rcp(new Epetra_MpiComm(EpetraMpiComm_))),
        subComm(0)
{

  numSubDomains = 1;
  subDomainRank = 0;
  numTimeSteps = numTimeSteps_;
  numTimeStepsOnDomain = numTimeSteps_;
  firstTimeStepOnDomain = 0;

  subComm = new Epetra_MpiComm(EpetraMpiComm_);

  // Create split communicators for time domain
  MPI_Comm time_split_MPI_Comm;
  int rank = EpetraMpiComm_.MyPID();
  (void) MPI_Comm_split(EpetraMpiComm_.Comm(), rank, rank,
                        &time_split_MPI_Comm);
  timeComm = new Epetra_MpiComm(time_split_MPI_Comm);
  numTimeDomains = EpetraMpiComm_.NumProc();
  timeDomainRank = rank;
}
开发者ID:EllieGong,项目名称:trilinos,代码行数:27,代码来源:EpetraExt_MultiMpiComm.cpp

示例3: comm

//
// The same main() driver routine as in the first Epetra lesson.
//
int
main (int argc, char *argv[])
{
    using std::cout;
    using std::endl;

#ifdef HAVE_MPI
    MPI_Init (&argc, &argv);
    Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
    Epetra_SerialComm comm;
#endif // HAVE_MPI

    if (comm.MyPID () == 0) {
        cout << "Total number of processes: " << comm.NumProc () << endl;
    }

    // Do something with the new Epetra communicator.
    exampleRoutine (comm, cout);

    // This tells the Trilinos test framework that the test passed.
    if (comm.MyPID () == 0) {
        cout << "End Result: TEST PASSED" << endl;
    }

#ifdef HAVE_MPI
    // Since you called MPI_Init, you are responsible for calling
    // MPI_Finalize after you are done using MPI.
    (void) MPI_Finalize ();
#endif // HAVE_MPI

    return 0;
}
开发者ID:00liujj,项目名称:trilinos,代码行数:36,代码来源:lesson02_init_map_vec.cpp

示例4: comm

int
main (int argc, char *argv[])
{
  using std::cout;
  using std::endl;

#ifdef HAVE_MPI
  MPI_Init (&argc, &argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif // HAVE_MPI

  const int myRank = comm.MyPID ();
  const int numProcs = comm.NumProc ();

  if (myRank == 0) {
    // Print out the Epetra software version.
    cout << Epetra_Version () << endl << endl
         << "Total number of processes: " << numProcs << endl;
  }

  example (comm); // Run the whole example.

  // This tells the Trilinos test framework that the test passed.
  if (myRank == 0) {
    cout << "End Result: TEST PASSED" << endl;
  }

#ifdef HAVE_MPI
  (void) MPI_Finalize ();
#endif // HAVE_MPI

  return 0;
}
开发者ID:LaHaine,项目名称:ohpc,代码行数:35,代码来源:lesson_epetra_dataredist.cpp

示例5: main

int main(int argc, char *argv[]) 
{
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  if (Comm.NumProc() != 1)
  {
    cerr << "To be run with one processor only" << endl;
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    exit(EXIT_SUCCESS);
  }

  Epetra_Map Map(8, 0, Comm);

  Epetra_CrsMatrix A(Copy, Map, 0);

  // for this matrix the incomplete factorization
  // is the exact one, so ILU and ILUT must be exact solvers.
  for (int row = 0; row < 8; ++row)
  {
    double value = 2.0 + row;
    A.InsertGlobalValues(row, 1, &value, &row);
    if (row)
    {
      int col = row - 1;
      value = 1.0 + row;
      A.InsertGlobalValues(row, 1, &value, &col);
    }
#if 0
    if (row != Map.NumGlobalElements() - 1)
    {
      int col = row + 1;
      value = 0.0;
      A.InsertGlobalValues(row, 1, &value, &col);
    }
#endif
  }

  A.FillComplete();

  Test<Ifpack_ILU>("Ifpack_ILU", A);
  Test<Ifpack_ILUT>("Ifpack_ILUT", A);
  Test<Ifpack_AdditiveSchwarz<Ifpack_ILU> >("AS, Ifpack_ILU", A);
  Test<Ifpack_AdditiveSchwarz<Ifpack_ILUT> >("AS, Ifpack_ILUT", A);

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
开发者ID:cakeisalie,项目名称:oomphlib_003,代码行数:57,代码来源:cxx_main.cpp

示例6: comm

int
main (int argc, char *argv[])
{
  // These "using" declarations make the code more concise, in that
  // you don't have to write the namespace along with the class or
  // object name.  This is especially helpful with commonly used
  // things like std::endl.
  using std::cout;
  using std::endl;

  // We assume that your code calls MPI_Init.  It's bad form
  // to ignore the error codes returned by MPI functions, but
  // we do so here for brevity.
  (void) MPI_Init (&argc, &argv);

  // This code takes the place of whatever you do to get an MPI_Comm.
  MPI_Comm yourComm = MPI_COMM_WORLD;

  // If your code plans to use MPI on its own, as well as through
  // Trilinos, you should strongly consider giving Trilinos a copy
  // of your MPI_Comm (created via MPI_Comm_dup).  Trilinos may in
  // the future duplicate the MPI_Comm automatically, but it does
  // not currently do this.

  // Wrap the MPI_Comm.  You are responsible for calling MPI_Comm_free
  // on your MPI_Comm after use, if necessary.  (It's not necessary or
  // legal to do this for built-in communicators like MPI_COMM_WORLD
  // or MPI_COMM_SELF.)
  Epetra_MpiComm comm (yourComm);

  // Epetra_Comm has methods that wrap basic MPI functionality.
  // MyPID() is equivalent to MPI_Comm_rank; it returns my process'
  // rank.  NumProc() is equivalent to MPI_Comm_size; it returns the
  // total number of processes in the communicator.
  const int myRank = comm.MyPID ();
  const int numProcs = comm.NumProc ();

  if (myRank == 0) {
    cout << "Total number of processes: " << numProcs << endl;
  }

  // Do something with the new Epetra communicator.
  exampleRoutine (comm, cout);

  // This tells the Trilinos test framework that the test passed.
  if (myRank == 0) {
    cout << "End Result: TEST PASSED" << endl;
  }

  // If you need to call MPI_Comm_free on your MPI_Comm, now would be
  // the time to do so, before calling MPI_Finalize.

  // Since you called MPI_Init, you are responsible for calling
  // MPI_Finalize after you are done using MPI.
  (void) MPI_Finalize ();
  return 0;
}
开发者ID:00liujj,项目名称:trilinos,代码行数:57,代码来源:lesson01_mpi_on_its_own.cpp

示例7: reportAverageTimes

void reportAverageTimes(Epetra_MpiComm &myEpetraComm){
	double myTime(0.0), globalSumTime(0.0);
	for(auto it: timeNames){
		myTime = accumulatedTimes[it.first];  
		globalSumTime = 0.0;
		myEpetraComm.SumAll(&myTime, &globalSumTime, 1);
		if(myEpetraComm.MyPID() == 0) std::cout << "Average " << timeNames[it.first] << 
		" time per iteration, averaged over all processors\n was: " << 
		(globalSumTime/myEpetraComm.NumProc())/NUM_ITERATIONS << std::endl;
	}
	
}
开发者ID:MDBrothers,项目名称:MortonCodeVsReplication,代码行数:12,代码来源:data_test.cpp

示例8: Comm

int 
main (int argc, char *argv[])
{
  // These "using" statements make the code a bit more concise.
  using std::cout;
  using std::endl;

  int ierr = 0, i;

  // If Trilinos was built with MPI, initialize MPI, otherwise
  // initialize the serial "communicator" that stands in for MPI.
#ifdef EPETRA_MPI
  MPI_Init (&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  const int MyPID = Comm.MyPID();
  const int NumProc = Comm.NumProc();
  // We only allow (MPI) Process 0 to write to stdout.
  const bool verbose = (MyPID == 0);
  const int NumGlobalElements = 100;

  if (verbose)
    cout << Epetra_Version() << endl << endl;

  // Asking the Epetra_Comm to print itself is a good test for whether
  // you are running in an MPI environment.  However, it will print
  // something on all MPI processes, so you should remove it for a
  // large-scale parallel run.
  cout << Comm << endl;

  if (NumGlobalElements < NumProc)
    {
      if (verbose)
        cout << "numGlobalBlocks = " << NumGlobalElements 
             << " cannot be < number of processors = " << NumProc << endl;
      std::exit (EXIT_FAILURE);
    }

  // Construct a Map that puts approximately the same number of rows
  // of the matrix A on each processor.
  Epetra_Map Map (NumGlobalElements, 0, Comm);

  // Get update list and number of local equations from newly created Map.
  int NumMyElements = Map.NumMyElements();

  std::vector<int> MyGlobalElements(NumMyElements);
  Map.MyGlobalElements(&MyGlobalElements[0]);

  // NumNz[i] is the number of nonzero elements in row i of the sparse
  // matrix on this MPI process.  Epetra_CrsMatrix uses this to figure
  // out how much space to allocate.
  std::vector<int> NumNz (NumMyElements);

  // We are building a tridiagonal matrix where each row contains the
  // nonzero elements (-1 2 -1).  Thus, we need 2 off-diagonal terms,
  // except for the first and last row of the matrix.
  for (int i = 0; i < NumMyElements; ++i)
    if (MyGlobalElements[i] == 0 || MyGlobalElements[i] == NumGlobalElements-1)
      NumNz[i] = 2; // First or last row
    else
      NumNz[i] = 3; // Not the (first or last row)

  // Create the Epetra_CrsMatrix.
  Epetra_CrsMatrix A (Copy, Map, &NumNz[0]);

  //
  // Add rows to the sparse matrix one at a time.
  //
  std::vector<double> Values(2);
  Values[0] = -1.0; Values[1] = -1.0;
  std::vector<int> Indices(2);
  const double two = 2.0;
  int NumEntries;

  for (int i = 0; i < NumMyElements; ++i)
    {
      if (MyGlobalElements[i] == 0)
        { // The first row of the matrix.
          Indices[0] = 1;
          NumEntries = 1;
        }
      else if (MyGlobalElements[i] == NumGlobalElements - 1)
        { // The last row of the matrix.
          Indices[0] = NumGlobalElements-2;
          NumEntries = 1;
        }
      else
        { // Any row of the matrix other than the first or last.
          Indices[0] = MyGlobalElements[i]-1;
          Indices[1] = MyGlobalElements[i]+1;
          NumEntries = 2;
        }
      ierr = A.InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
      assert (ierr==0);
      // Insert the diagonal entry.
      ierr = A.InsertGlobalValues(MyGlobalElements[i], 1, &two, &MyGlobalElements[i]);
      assert(ierr==0);
//.........这里部分代码省略.........
开发者ID:hortonka,项目名称:Trilinos_tutorial,代码行数:101,代码来源:Epetra_Power_Method.cpp

示例9: main

int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // The problem is defined on a 2D grid, global size is nx * nx.
  int nx = 30;
  Teuchos::ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  LHS->PutScalar(0.0); RHS->Random();

  // ========================================= //
  // Compare IC preconditioners to no precond. //
  // ----------------------------------------- //

  const double tol = 1e-5;
  const int maxIter = 500;

  // Baseline: No preconditioning
  // Compute number of iterations, to compare to IC later.

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  //solver.SetPrecOperator(&*PrecDiag);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int Iters = solver.NumIters();
  //cout << "No preconditioner iterations: " << Iters << endl;

#if 0 
  // Not sure how to use Ifpack_CrsRick - leave out for now.
  //
  // I wanna test funky values to be sure that they have the same
  // influence on the algorithms, both old and new
  int    LevelFill = 2;
  double DropTol = 0.3333;
  double Condest;
  
  Teuchos::RefCountPtr<Ifpack_CrsRick> IC;
  Ifpack_IlukGraph mygraph (A->Graph(), 0, 0);
  IC = Teuchos::rcp( new Ifpack_CrsRick(*A, mygraph) );
  IC->SetAbsoluteThreshold(0.00123);
  IC->SetRelativeThreshold(0.9876);
  // Init values from A
  IC->InitValues(*A);
  // compute the factors
  IC->Factor();
  // and now estimate the condition number
  IC->Condest(false,Condest);
  
  if( Comm.MyPID() == 0 ) {
    cout << "Condition number estimate (level-of-fill = "
	 << LevelFill <<  ") = " << Condest << endl;
  }

  // Define label for printing out during the solve phase
  std::string label = "Ifpack_CrsRick Preconditioner: LevelFill = " + toString(LevelFill) + 
                                                 " Overlap = 0"; 
  IC->SetLabel(label.c_str());
  
  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*IC);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int RickIters = solver.NumIters();
  //cout << "Ifpack_Rick iterations: " << RickIters << endl;

  // Compare to no preconditioning
  if (RickIters > Iters/2)
    IFPACK_CHK_ERR(-1);

#endif

  //////////////////////////////////////////////////////
//.........这里部分代码省略.........
开发者ID:KineticTheory,项目名称:Trilinos,代码行数:101,代码来源:cxx_main.cpp

示例10: main

int main(int argc, char *argv[]) {

// standard Epetra MPI/Serial Comm startup	
#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif
  
  int MyPID = comm.MyPID();
  int ierr = 0;
  bool verbose = (0 == MyPID);
  bool reportErrors = (0 == MyPID);
  // setup MatlabEngine
  if (verbose) cout << "going to startup a matlab process...\n";
  EpetraExt::EpetraExt_MatlabEngine engine (comm);
  if (verbose) cout << "matlab started\n";
  
  // setup an array of doubles to be used for the examples
  int M = 20;
  int numGlobalElements = M * comm.NumProc();
  int N = 3;
  int numMyEntries = M * N;
  double* A = new double[numMyEntries];
  double* Aptr = A;
  int startValue = numMyEntries * MyPID;

  for(int col=0; col < N; col++) {
	for(int row=0; row < M; row++) {
          *Aptr++ = startValue++;
      }
  }

  // setup an array of ints to be used for the examples
  int* intA = new int[numMyEntries];
  int* intAptr = intA;
  int intStartValue = numMyEntries * MyPID;
  for(int i=0; i < M*N; i++) {
      *intAptr++ = intStartValue++;
  }
  
  // construct a map to be used by distributed objects
  Epetra_Map map (numGlobalElements, 0, comm);
  
  // CrsMatrix example
  // constructs a globally distributed CrsMatrix and then puts it into Matlab
  if (verbose) cout << " constructing CrsMatrix...\n";
  Epetra_CrsMatrix crsMatrix (Copy, map, N);
  int* indices = new int[N];
  for (int col=0; col < N; col++) {
    indices[col] = col;	  
  }
  
  double value = startValue;
  double* values = new double[numMyEntries];
  int minMyGID = map.MinMyGID();
  for (int row=0; row < M; row++) {
    for (int col=0; col < N; col++) {
      values[col] = value++;
    }
      
    crsMatrix.InsertGlobalValues(minMyGID + row, N, values, indices);
  }
  
  crsMatrix.FillComplete();
  if (verbose) cout << " CrsMatrix constructed\n";
  if (verbose) cout << " putting CrsMatrix into Matlab as CRSM\n";
  ierr = engine.PutRowMatrix(crsMatrix, "CRSM", false);
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutRowMatrix(crsMatrix, \"CRSM\", false): " << ierr << endl;
  }
  
  // BlockMap example
  // puts a map into Matlab
  if (verbose) cout << " putting Map into Matlab as MAP\n";
  ierr = engine.PutBlockMap(map, "MAP", false);
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutBlockMap(map, \"MAP\", false);: " << ierr << endl;
  }
  
  // MultiVector example
  // constructs a globally distributed MultiVector and then puts it into Matlab
  if (verbose) cout << " constructing MultiVector...\n";
  Epetra_MultiVector multiVector (Copy, map, A, M, N);
  if (verbose) cout << " MultiVector constructed\n";
  if (verbose) cout << " putting MultiVector into Matlab as MV\n";
  ierr = engine.PutMultiVector(multiVector, "MV");
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutMultiVector(multiVector, \"MV\"): " << ierr << endl;
  }
  
  // SerialDenseMatrix example
  // constructs a SerialDenseMatrix on every PE
  if (verbose) cout << " constructing a SerialDenseMatrix...\n";
  Epetra_SerialDenseMatrix sdMatrix (Copy, A, M, M, N);
  if (verbose) cout << " SerialDenseMatrix constructed\n";
  if (verbose) cout << " putting SerialDenseMatrix from PE0 into Matlab as SDM_PE0\n";
  // since the third parameter is left out, the SerialDenseMatrix from PE0 is used by default
  ierr = engine.PutSerialDenseMatrix(sdMatrix, "SDM_PE0");
//.........这里部分代码省略.........
开发者ID:00liujj,项目名称:trilinos,代码行数:101,代码来源:cxx_main.cpp

示例11: build_test_matrix

void build_test_matrix(Epetra_MpiComm & Comm, int test_number, Epetra_CrsMatrix *&A){
  int NumProc = Comm.NumProc();
  int MyPID   = Comm.MyPID();

  if(test_number==1){
    // Case 1: Tridiagonal
    int NumMyEquations = 100;

    int NumGlobalEquations = (NumMyEquations * NumProc) + EPETRA_MIN(NumProc,3);
    if(MyPID < 3)  NumMyEquations++;

    // Construct a Map that puts approximately the same Number of equations on each processor
    Epetra_Map Map(NumGlobalEquations, NumMyEquations, 0, Comm);

    // Get update list and number of local equations from newly created Map
    int* MyGlobalElements = new int[Map.NumMyElements()];
    Map.MyGlobalElements(MyGlobalElements);

    // Create an integer vector NumNz that is used to build the Petra Matrix.
    // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation on this processor

    int* NumNz = new int[NumMyEquations];

    // We are building a tridiagonal matrix where each row has (-1 2 -1)
    // So we need 2 off-diagonal terms (except for the first and last equation)

    for (int i = 0; i < NumMyEquations; i++)
      if((MyGlobalElements[i] == 0) || (MyGlobalElements[i] == NumGlobalEquations - 1))
	NumNz[i] = 1;
      else
	NumNz[i] = 2;

    // Create a Epetra_Matrix
    A=new Epetra_CrsMatrix(Copy, Map, NumNz);

    // Add  rows one-at-a-time
    // Need some vectors to help
    // Off diagonal Values will always be -1

    double* Values = new double[2];
    Values[0] = -1.0;
    Values[1] = -1.0;
    int* Indices = new int[2];
    double two = 2.0;
    int NumEntries;

    for (int i = 0; i < NumMyEquations; i++) {
      if(MyGlobalElements[i] == 0) {
	Indices[0] = 1;
	NumEntries = 1;
      }
      else if (MyGlobalElements[i] == NumGlobalEquations-1) {
	Indices[0] = NumGlobalEquations-2;
	NumEntries = 1;
      }
      else {
	Indices[0] = MyGlobalElements[i]-1;
	Indices[1] = MyGlobalElements[i]+1;
	NumEntries = 2;
      }
      A->InsertGlobalValues(MyGlobalElements[i], NumEntries, Values, Indices);
      A->InsertGlobalValues(MyGlobalElements[i], 1, &two, MyGlobalElements+i);
    }

    A->FillComplete();

    // Cleanup
    delete [] MyGlobalElements;
    delete [] NumNz;
    delete [] Values;
    delete [] Indices;

  }
}
开发者ID:KineticTheory,项目名称:Trilinos,代码行数:74,代码来源:cxx_main.cpp

示例12: Epetra_Map

//=============================================================================
Epetra_Map * Epetra_Map::RemoveEmptyProcesses() const
{
#ifdef HAVE_MPI
  const Epetra_MpiComm * MpiComm = dynamic_cast<const Epetra_MpiComm*>(&Comm());

  // If the Comm isn't MPI, just treat this as a copy constructor
  if(!MpiComm) return new Epetra_Map(*this);

  MPI_Comm NewComm,MyMPIComm = MpiComm->Comm();

  // Create the new communicator.  MPI_Comm_split returns a valid
  // communicator on all processes.  On processes where color == MPI_UNDEFINED,
  // ignore the result.  Passing key == 0 tells MPI to order the
  // processes in the new communicator by their rank in the old
  // communicator.
  const int color = (NumMyElements() == 0) ? MPI_UNDEFINED : 1;

  // MPI_Comm_split must be called collectively over the original
  // communicator.  We can't just call it on processes with color
  // one, even though we will ignore its result on processes with
  // color zero.
  int rv = MPI_Comm_split(MyMPIComm,color,0,&NewComm);
  if(rv!=MPI_SUCCESS) throw ReportError("Epetra_Map::RemoveEmptyProcesses: MPI_Comm_split failed.",-1);

  if(color == MPI_UNDEFINED)
    return 0; // We're not in the new map
  else {
    Epetra_MpiComm * NewEpetraComm = new Epetra_MpiComm(NewComm);

    // Use the copy constructor for a new map, but basically because it does nothing useful
    Epetra_Map * NewMap = new Epetra_Map(*this);

    // Get rid of the old BlockMapData, now make a new one from scratch...
    NewMap->CleanupData();
    if(GlobalIndicesInt()) {
#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
      NewMap->BlockMapData_ = new Epetra_BlockMapData(NumGlobalElements(),0,IndexBase(),*NewEpetraComm,false);
#endif
    }
    else {
#ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
      NewMap->BlockMapData_ = new Epetra_BlockMapData(NumGlobalElements64(),0,IndexBase64(),*NewEpetraComm,true);
#endif
    }

    // Now copy all of the relevent bits of BlockMapData...
    //    NewMap->BlockMapData_->Comm_                    = NewEpetraComm;
    NewMap->BlockMapData_->LID_                     = BlockMapData_->LID_;
#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
    NewMap->BlockMapData_->MyGlobalElements_int_    = BlockMapData_->MyGlobalElements_int_;
#endif
#ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
    NewMap->BlockMapData_->MyGlobalElements_LL_     = BlockMapData_->MyGlobalElements_LL_;
#endif
    NewMap->BlockMapData_->FirstPointInElementList_ = BlockMapData_->FirstPointInElementList_;
    NewMap->BlockMapData_->ElementSizeList_         = BlockMapData_->ElementSizeList_;
    NewMap->BlockMapData_->PointToElementList_      = BlockMapData_->PointToElementList_;

    NewMap->BlockMapData_->NumGlobalElements_       = BlockMapData_->NumGlobalElements_;
    NewMap->BlockMapData_->NumMyElements_           = BlockMapData_->NumMyElements_;
    NewMap->BlockMapData_->IndexBase_               = BlockMapData_->IndexBase_;
    NewMap->BlockMapData_->ElementSize_             = BlockMapData_->ElementSize_;
    NewMap->BlockMapData_->MinMyElementSize_        = BlockMapData_->MinMyElementSize_;
    NewMap->BlockMapData_->MaxMyElementSize_        = BlockMapData_->MaxMyElementSize_;
    NewMap->BlockMapData_->MinElementSize_          = BlockMapData_->MinElementSize_;
    NewMap->BlockMapData_->MaxElementSize_          = BlockMapData_->MaxElementSize_;
    NewMap->BlockMapData_->MinAllGID_               = BlockMapData_->MinAllGID_;
    NewMap->BlockMapData_->MaxAllGID_               = BlockMapData_->MaxAllGID_;
    NewMap->BlockMapData_->MinMyGID_                = BlockMapData_->MinMyGID_;
    NewMap->BlockMapData_->MaxMyGID_                = BlockMapData_->MaxMyGID_;
    NewMap->BlockMapData_->MinLID_                  = BlockMapData_->MinLID_;
    NewMap->BlockMapData_->MaxLID_                  = BlockMapData_->MaxLID_;
    NewMap->BlockMapData_->NumGlobalPoints_         = BlockMapData_->NumGlobalPoints_;
    NewMap->BlockMapData_->NumMyPoints_             = BlockMapData_->NumMyPoints_;
    NewMap->BlockMapData_->ConstantElementSize_     = BlockMapData_->ConstantElementSize_;
    NewMap->BlockMapData_->LinearMap_               = BlockMapData_->LinearMap_;
    NewMap->BlockMapData_->DistributedGlobal_       = NewEpetraComm->NumProc()==1 ? false : BlockMapData_->DistributedGlobal_;
    NewMap->BlockMapData_->OneToOneIsDetermined_    = BlockMapData_->OneToOneIsDetermined_;
    NewMap->BlockMapData_->OneToOne_                = BlockMapData_->OneToOne_;
    NewMap->BlockMapData_->GlobalIndicesInt_        = BlockMapData_->GlobalIndicesInt_;
    NewMap->BlockMapData_->GlobalIndicesLongLong_   = BlockMapData_->GlobalIndicesLongLong_;
    NewMap->BlockMapData_->LastContiguousGID_       = BlockMapData_->LastContiguousGID_;
    NewMap->BlockMapData_->LastContiguousGIDLoc_    = BlockMapData_->LastContiguousGIDLoc_;
    NewMap->BlockMapData_->LIDHash_                 = BlockMapData_->LIDHash_ ? new Epetra_HashTable<int>(*BlockMapData_->LIDHash_) : 0;

    // Delay directory construction
    NewMap->BlockMapData_->Directory_               = 0;

    // Cleanup
    delete NewEpetraComm;

    return NewMap;
  }
#else
    // MPI isn't compiled, so just treat this as a copy constructor
    return new Epetra_Map(*this);
#endif
}
开发者ID:EllieGong,项目名称:trilinos,代码行数:99,代码来源:Epetra_Map.cpp

示例13: main

int main(int argc, char *argv[]) {

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif

  int MyPID = comm.MyPID();

  bool verbose = false;
  bool verbose1 = false; 
  // Check if we should print results to standard out
  if (argc > 1) {
    if ((argv[1][0] == '-') && (argv[1][1] == 'v')) {
      verbose1 = true;
      if (MyPID==0) verbose = true;
    }
  }
  if (verbose)
    std::cout << EpetraExt::EpetraExt_Version() << std::endl << std::endl;

  if (verbose1) std::cout << comm << std::endl;


  // Uncomment the next three lines to debug in mpi mode
  //int tmp;
  //if (MyPID==0) cin >> tmp;
  //comm.Barrier();

  Epetra_Map * map;
  Epetra_CrsMatrix * A; 
  Epetra_Vector * x; 
  Epetra_Vector * b;
  Epetra_Vector * xexact;

  int nx = 20*comm.NumProc();
  int ny = 30;
  int npoints = 7;
  int xoff[] = {-1,  0,  1, -1,  0,  1,  0};
  int yoff[] = {-1, -1, -1,  0,  0,  0,  1};

   
  int ierr = 0;
  // Call routine to read in HB problem 0-base
  Trilinos_Util_GenerateCrsProblem(nx, ny, npoints, xoff, yoff, comm, map, A, x, b, xexact);

  ierr += runTests(*map, *A, *x, *b, *xexact, verbose);

  delete A;
  delete x;
  delete b;
  delete xexact;
  delete map;

  // Call routine to read in HB problem 1-base
  Trilinos_Util_GenerateCrsProblem(nx, ny, npoints, xoff, yoff, comm, map, A, x, b, xexact, 1);

  ierr += runTests(*map, *A, *x, *b, *xexact, verbose);

  delete A;
  delete x;
  delete b;
  delete xexact;
  delete map;

  // Call routine to read in HB problem -1-base
  Trilinos_Util_GenerateCrsProblem(nx, ny, npoints, xoff, yoff, comm, map, A, x, b, xexact, -1);

  ierr += runTests(*map, *A, *x, *b, *xexact, verbose);

  delete A;
  delete x;
  delete b;
  delete xexact;
  delete map;

  int nx1 = 5;
  int ny1 = 4;
  Poisson2dOperator Op(nx1, ny1, comm);
  ierr += runOperatorTests(Op, verbose);

  generateHyprePrintOut("MyMatrixFile", comm);

  EPETRA_CHK_ERR(EpetraExt::HypreFileToCrsMatrix("MyMatrixFile", comm, A));
  
  runHypreTest(*A);
  delete A;

  #ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

  return(ierr);
}
开发者ID:00liujj,项目名称:trilinos,代码行数:96,代码来源:cxx_main.cpp


注:本文中的Epetra_MpiComm::NumProc方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。