当前位置: 首页>>代码示例>>C++>>正文


C++ Epetra_MpiComm::MyPID方法代码示例

本文整理汇总了C++中Epetra_MpiComm::MyPID方法的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_MpiComm::MyPID方法的具体用法?C++ Epetra_MpiComm::MyPID怎么用?C++ Epetra_MpiComm::MyPID使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Epetra_MpiComm的用法示例。


在下文中一共展示了Epetra_MpiComm::MyPID方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: comm

//
// The same main() driver routine as in the first Epetra lesson.
//
int
main (int argc, char *argv[])
{
    using std::cout;
    using std::endl;

#ifdef HAVE_MPI
    MPI_Init (&argc, &argv);
    Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
    Epetra_SerialComm comm;
#endif // HAVE_MPI

    if (comm.MyPID () == 0) {
        cout << "Total number of processes: " << comm.NumProc () << endl;
    }

    // Do something with the new Epetra communicator.
    exampleRoutine (comm, cout);

    // This tells the Trilinos test framework that the test passed.
    if (comm.MyPID () == 0) {
        cout << "End Result: TEST PASSED" << endl;
    }

#ifdef HAVE_MPI
    // Since you called MPI_Init, you are responsible for calling
    // MPI_Finalize after you are done using MPI.
    (void) MPI_Finalize ();
#endif // HAVE_MPI

    return 0;
}
开发者ID:00liujj,项目名称:trilinos,代码行数:36,代码来源:lesson02_init_map_vec.cpp

示例2: myComm

// This constructor is for just one subdomain, so only adds the info
// for multiple time steps on the domain. No two-level parallelism.
MultiMpiComm::MultiMpiComm(const Epetra_MpiComm& EpetraMpiComm_, int numTimeSteps_,
                           const Teuchos::EVerbosityLevel verbLevel) :
        Epetra_MpiComm(EpetraMpiComm_),
        Teuchos::VerboseObject<MultiMpiComm>(verbLevel),
        myComm(Teuchos::rcp(new Epetra_MpiComm(EpetraMpiComm_))),
        subComm(0)
{

  numSubDomains = 1;
  subDomainRank = 0;
  numTimeSteps = numTimeSteps_;
  numTimeStepsOnDomain = numTimeSteps_;
  firstTimeStepOnDomain = 0;

  subComm = new Epetra_MpiComm(EpetraMpiComm_);

  // Create split communicators for time domain
  MPI_Comm time_split_MPI_Comm;
  int rank = EpetraMpiComm_.MyPID();
  (void) MPI_Comm_split(EpetraMpiComm_.Comm(), rank, rank,
                        &time_split_MPI_Comm);
  timeComm = new Epetra_MpiComm(time_split_MPI_Comm);
  numTimeDomains = EpetraMpiComm_.NumProc();
  timeDomainRank = rank;
}
开发者ID:EllieGong,项目名称:trilinos,代码行数:27,代码来源:EpetraExt_MultiMpiComm.cpp

示例3: Comm

int
main ( int argc, char** argv )
{

#ifdef HAVE_MPI
    MPI_Init (&argc, &argv);
    Epetra_MpiComm Comm (MPI_COMM_WORLD);
    if ( Comm.MyPID() == 0 )
    {
        std::cout << "% using MPI" << std::endl;
    }
#else
    Epetra_SerialComm Comm;
    cout << "% using serial Version" << endl;
#endif

    //**************** cylinder
    //    MPI_Init(&argc,&argv);

    EnsightToHdf5 es ( argc, argv );
    es.run();

#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return ( EXIT_SUCCESS );
}
开发者ID:Danniel-UCAS,项目名称:lifev,代码行数:27,代码来源:main.cpp

示例4: comm

int
main (int argc, char *argv[])
{
  using std::cout;
  using std::endl;

#ifdef HAVE_MPI
  MPI_Init (&argc, &argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif // HAVE_MPI

  const int myRank = comm.MyPID ();
  const int numProcs = comm.NumProc ();

  if (myRank == 0) {
    // Print out the Epetra software version.
    cout << Epetra_Version () << endl << endl
         << "Total number of processes: " << numProcs << endl;
  }

  example (comm); // Run the whole example.

  // This tells the Trilinos test framework that the test passed.
  if (myRank == 0) {
    cout << "End Result: TEST PASSED" << endl;
  }

#ifdef HAVE_MPI
  (void) MPI_Finalize ();
#endif // HAVE_MPI

  return 0;
}
开发者ID:LaHaine,项目名称:ohpc,代码行数:35,代码来源:lesson_epetra_dataredist.cpp

示例5: comm

int
main (int argc, char *argv[])
{
  // These "using" declarations make the code more concise, in that
  // you don't have to write the namespace along with the class or
  // object name.  This is especially helpful with commonly used
  // things like std::endl.
  using std::cout;
  using std::endl;

  // We assume that your code calls MPI_Init.  It's bad form
  // to ignore the error codes returned by MPI functions, but
  // we do so here for brevity.
  (void) MPI_Init (&argc, &argv);

  // This code takes the place of whatever you do to get an MPI_Comm.
  MPI_Comm yourComm = MPI_COMM_WORLD;

  // If your code plans to use MPI on its own, as well as through
  // Trilinos, you should strongly consider giving Trilinos a copy
  // of your MPI_Comm (created via MPI_Comm_dup).  Trilinos may in
  // the future duplicate the MPI_Comm automatically, but it does
  // not currently do this.

  // Wrap the MPI_Comm.  You are responsible for calling MPI_Comm_free
  // on your MPI_Comm after use, if necessary.  (It's not necessary or
  // legal to do this for built-in communicators like MPI_COMM_WORLD
  // or MPI_COMM_SELF.)
  Epetra_MpiComm comm (yourComm);

  // Epetra_Comm has methods that wrap basic MPI functionality.
  // MyPID() is equivalent to MPI_Comm_rank; it returns my process'
  // rank.  NumProc() is equivalent to MPI_Comm_size; it returns the
  // total number of processes in the communicator.
  const int myRank = comm.MyPID ();
  const int numProcs = comm.NumProc ();

  if (myRank == 0) {
    cout << "Total number of processes: " << numProcs << endl;
  }

  // Do something with the new Epetra communicator.
  exampleRoutine (comm, cout);

  // This tells the Trilinos test framework that the test passed.
  if (myRank == 0) {
    cout << "End Result: TEST PASSED" << endl;
  }

  // If you need to call MPI_Comm_free on your MPI_Comm, now would be
  // the time to do so, before calling MPI_Finalize.

  // Since you called MPI_Init, you are responsible for calling
  // MPI_Finalize after you are done using MPI.
  (void) MPI_Finalize ();
  return 0;
}
开发者ID:00liujj,项目名称:trilinos,代码行数:57,代码来源:lesson01_mpi_on_its_own.cpp

示例6: reportAverageTimes

void reportAverageTimes(Epetra_MpiComm &myEpetraComm){
	double myTime(0.0), globalSumTime(0.0);
	for(auto it: timeNames){
		myTime = accumulatedTimes[it.first];  
		globalSumTime = 0.0;
		myEpetraComm.SumAll(&myTime, &globalSumTime, 1);
		if(myEpetraComm.MyPID() == 0) std::cout << "Average " << timeNames[it.first] << 
		" time per iteration, averaged over all processors\n was: " << 
		(globalSumTime/myEpetraComm.NumProc())/NUM_ITERATIONS << std::endl;
	}
	
}
开发者ID:MDBrothers,项目名称:MortonCodeVsReplication,代码行数:12,代码来源:data_test.cpp

示例7: main

Int main ( Int argc, char** argv )
{
    //! Initializing Epetra communicator
    MPI_Init (&argc, &argv);
    Epetra_MpiComm Comm (MPI_COMM_WORLD);
    if ( Comm.MyPID() == 0 )
    {
        cout << "% using MPI" << endl;
    }
    Heart heart ( argc, argv );
    heart.run();

    //! Finalizing Epetra communicator
    MPI_Finalize();
    return ( EXIT_SUCCESS );
}
开发者ID:erianthus,项目名称:lifev,代码行数:16,代码来源:main.cpp

示例8: main

int main(int argc, char **argv)
{
  //
  //  If --enable-mpi, an MPI communicator is used, otherwise a serial
  //  stub communicator is used.  
  //
#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  //
  //  Print out a summary line followed by a "Hello" line from each process
  //

  if (Comm.MyPID()==0)
    cout << New_Package_Version() << endl << endl;

  Newp_Hello Hello( Comm ) ; 
  Hello.Print( cout );


  //
  //  If --enable-newp_swahili is set, HAVE_NEWP_SWAHILI is set in 
  //    New_Package_config.h which is included by Newp_Hello.h and hence:
  //      Print out a summary line followed by a "Jambo" line from each process
  //
#ifdef HAVE_NEWP_SWAHILI
  Newp_Jambo Jambo( Comm ) ; 
  Jambo.Print( cout );
#endif

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif
  return 0;
}
开发者ID:gitter-badger,项目名称:quinoa,代码行数:39,代码来源:cxx_main.cpp

示例9: verbose

int
main ( int argc, char** argv )
{

    bool verbose (false);
#ifdef HAVE_MPI
    MPI_Init (&argc, &argv);
    Epetra_MpiComm Comm (MPI_COMM_WORLD);
    if ( Comm.MyPID() == 0 )
    {
        verbose = true;
    }
#else
    Epetra_SerialComm Comm;
    verbose = true;
#endif

    NavierStokes<RegionMesh<LinearTriangle>, KimMoin >
    ns ( argc, argv, "dataKimMoin", "kimMoin" );

    ns.run();

    if (verbose)
    {
        std::cout << "End Result: TEST PASSED" << std::endl;
    }

#ifdef HAVE_MPI
    if (verbose)
    {
        std::cout << "MPI Finalization" << std::endl;
    }
    MPI_Finalize();
#endif
    return ( EXIT_SUCCESS );
}
开发者ID:Danniel-UCAS,项目名称:lifev,代码行数:36,代码来源:mainKimMoin.cpp

示例10: Comm

int 
main (int argc, char *argv[])
{
  // These "using" statements make the code a bit more concise.
  using std::cout;
  using std::endl;

  int ierr = 0, i;

  // If Trilinos was built with MPI, initialize MPI, otherwise
  // initialize the serial "communicator" that stands in for MPI.
#ifdef EPETRA_MPI
  MPI_Init (&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  const int MyPID = Comm.MyPID();
  const int NumProc = Comm.NumProc();
  // We only allow (MPI) Process 0 to write to stdout.
  const bool verbose = (MyPID == 0);
  const int NumGlobalElements = 100;

  if (verbose)
    cout << Epetra_Version() << endl << endl;

  // Asking the Epetra_Comm to print itself is a good test for whether
  // you are running in an MPI environment.  However, it will print
  // something on all MPI processes, so you should remove it for a
  // large-scale parallel run.
  cout << Comm << endl;

  if (NumGlobalElements < NumProc)
    {
      if (verbose)
        cout << "numGlobalBlocks = " << NumGlobalElements 
             << " cannot be < number of processors = " << NumProc << endl;
      std::exit (EXIT_FAILURE);
    }

  // Construct a Map that puts approximately the same number of rows
  // of the matrix A on each processor.
  Epetra_Map Map (NumGlobalElements, 0, Comm);

  // Get update list and number of local equations from newly created Map.
  int NumMyElements = Map.NumMyElements();

  std::vector<int> MyGlobalElements(NumMyElements);
  Map.MyGlobalElements(&MyGlobalElements[0]);

  // NumNz[i] is the number of nonzero elements in row i of the sparse
  // matrix on this MPI process.  Epetra_CrsMatrix uses this to figure
  // out how much space to allocate.
  std::vector<int> NumNz (NumMyElements);

  // We are building a tridiagonal matrix where each row contains the
  // nonzero elements (-1 2 -1).  Thus, we need 2 off-diagonal terms,
  // except for the first and last row of the matrix.
  for (int i = 0; i < NumMyElements; ++i)
    if (MyGlobalElements[i] == 0 || MyGlobalElements[i] == NumGlobalElements-1)
      NumNz[i] = 2; // First or last row
    else
      NumNz[i] = 3; // Not the (first or last row)

  // Create the Epetra_CrsMatrix.
  Epetra_CrsMatrix A (Copy, Map, &NumNz[0]);

  //
  // Add rows to the sparse matrix one at a time.
  //
  std::vector<double> Values(2);
  Values[0] = -1.0; Values[1] = -1.0;
  std::vector<int> Indices(2);
  const double two = 2.0;
  int NumEntries;

  for (int i = 0; i < NumMyElements; ++i)
    {
      if (MyGlobalElements[i] == 0)
        { // The first row of the matrix.
          Indices[0] = 1;
          NumEntries = 1;
        }
      else if (MyGlobalElements[i] == NumGlobalElements - 1)
        { // The last row of the matrix.
          Indices[0] = NumGlobalElements-2;
          NumEntries = 1;
        }
      else
        { // Any row of the matrix other than the first or last.
          Indices[0] = MyGlobalElements[i]-1;
          Indices[1] = MyGlobalElements[i]+1;
          NumEntries = 2;
        }
      ierr = A.InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
      assert (ierr==0);
      // Insert the diagonal entry.
      ierr = A.InsertGlobalValues(MyGlobalElements[i], 1, &two, &MyGlobalElements[i]);
      assert(ierr==0);
//.........这里部分代码省略.........
开发者ID:hortonka,项目名称:Trilinos_tutorial,代码行数:101,代码来源:Epetra_Power_Method.cpp

示例11: main

// *************************************************************
// main program - This benchmark code reads a Harwell-Boeing data
//                set and finds the minimal eigenvalue of the matrix
//                using inverse iteration.
// *************************************************************
int main(int argc, char *argv[]) {

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  cout << Comm << endl;

  int MyPID = Comm.MyPID();

  bool verbose = false;
  if (MyPID==0) verbose = true; // Print out detailed results (turn off for best performance)

  if(argc != 2) {
    if (verbose) cerr << "Usage: " << argv[0] << " HB_data_file" << endl;
    exit(1); // Error
  }

  // Define pointers that will be set by HB read function

  Epetra_Map * readMap;
  Epetra_CrsMatrix * readA;
  Epetra_Vector * readx;
  Epetra_Vector * readb;
  Epetra_Vector * readxexact;

  // Call function to read in HB problem
  Trilinos_Util_ReadHb2Epetra(argv[1], Comm, readMap, readA, readx, readb, readxexact);

  // Not interested in x, b or xexact for an eigenvalue problem
  delete readx;
  delete readb;
  delete readxexact;

#ifdef EPETRA_MPI // If running in parallel, we need to distribute matrix across all PEs.

  // Create uniform distributed map
  Epetra_Map map(readMap->NumGlobalElements(), 0, Comm);

  // Create Exporter to distribute read-in matrix and vectors

  Epetra_Export exporter(*readMap, map);
  Epetra_CrsMatrix A(Copy, map, 0);

  A.Export(*readA, exporter, Add);
  assert(A.FillComplete()==0);

  delete readA;
  delete readMap;

#else // If not running in parallel, we do not need to distribute the matrix
  Epetra_CrsMatrix & A = *readA;
#endif

  // Create flop counter to collect all FLOPS
  Epetra_Flops counter;
  A.SetFlopCounter(counter);

  double lambda = 0; // Minimal eigenvalue returned here
  // Call inverse iteration solver
  Epetra_Time timer(Comm);
  invIteration(A, lambda, verbose);
  double elapsedTime = timer.ElapsedTime();
  double totalFlops = counter.Flops();
  double MFLOPS = totalFlops/elapsedTime/1000000.0;


  cout << endl
       << "*************************************************" << endl
       << " Approximate smallest eigenvalue = " << lambda << endl
       << "    Total Time    = " << elapsedTime << endl
       << "    Total FLOPS   = " << totalFlops << endl
       << "    Total MFLOPS  = " << MFLOPS << endl
       << "*************************************************" << endl;

  // All done
#ifdef EPETRA_MPI
  MPI_Finalize();
#else
  delete readA;
  delete readMap;
#endif

return (0);
}
开发者ID:00liujj,项目名称:trilinos,代码行数:93,代码来源:cxx_main.cpp

示例12: main

int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // The problem is defined on a 2D grid, global size is nx * nx.
  int nx = 30;
  Teuchos::ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  LHS->PutScalar(0.0); RHS->Random();

  // ========================================= //
  // Compare IC preconditioners to no precond. //
  // ----------------------------------------- //

  const double tol = 1e-5;
  const int maxIter = 500;

  // Baseline: No preconditioning
  // Compute number of iterations, to compare to IC later.

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  //solver.SetPrecOperator(&*PrecDiag);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int Iters = solver.NumIters();
  //cout << "No preconditioner iterations: " << Iters << endl;

#if 0 
  // Not sure how to use Ifpack_CrsRick - leave out for now.
  //
  // I wanna test funky values to be sure that they have the same
  // influence on the algorithms, both old and new
  int    LevelFill = 2;
  double DropTol = 0.3333;
  double Condest;
  
  Teuchos::RefCountPtr<Ifpack_CrsRick> IC;
  Ifpack_IlukGraph mygraph (A->Graph(), 0, 0);
  IC = Teuchos::rcp( new Ifpack_CrsRick(*A, mygraph) );
  IC->SetAbsoluteThreshold(0.00123);
  IC->SetRelativeThreshold(0.9876);
  // Init values from A
  IC->InitValues(*A);
  // compute the factors
  IC->Factor();
  // and now estimate the condition number
  IC->Condest(false,Condest);
  
  if( Comm.MyPID() == 0 ) {
    cout << "Condition number estimate (level-of-fill = "
	 << LevelFill <<  ") = " << Condest << endl;
  }

  // Define label for printing out during the solve phase
  std::string label = "Ifpack_CrsRick Preconditioner: LevelFill = " + toString(LevelFill) + 
                                                 " Overlap = 0"; 
  IC->SetLabel(label.c_str());
  
  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*IC);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int RickIters = solver.NumIters();
  //cout << "Ifpack_Rick iterations: " << RickIters << endl;

  // Compare to no preconditioning
  if (RickIters > Iters/2)
    IFPACK_CHK_ERR(-1);

#endif

  //////////////////////////////////////////////////////
//.........这里部分代码省略.........
开发者ID:KineticTheory,项目名称:Trilinos,代码行数:101,代码来源:cxx_main.cpp

示例13: main

int main(int argc, char *argv[]) {

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();

  bool verbose = true; 
  if (MyPID==0) verbose = true;

  if (verbose)
    cout << EpetraExt::EpetraExt_Version() << endl << endl;
                                                                                
  cout << Comm << endl;

  if(argc < 2 && verbose) {
    cerr << "Usage: " << argv[0] 
	 << " HB_filename" << endl;
    return(1);

  }

  // Uncomment the next three lines to debug in mpi mode
  //int tmp;
  //if (MyPID==0) cin >> tmp;
  //Comm.Barrier();

  Epetra_Map * readMap;
  Epetra_CrsMatrix * readA; 
  Epetra_Vector * readx; 
  Epetra_Vector * readb;
  Epetra_Vector * readxexact;
   
  // Call routine to read in HB problem
  Trilinos_Util_ReadHb2Epetra(argv[1], Comm, readMap, readA, readx, readb, readxexact);

  // Create uniform distributed map
  Epetra_Map map(readMap->NumGlobalElements(), 0, Comm);

  // Create Exporter to distribute read-in matrix and vectors

  Epetra_Export exporter(*readMap, map);
  Epetra_CrsMatrix A(Copy, map, 0);
  Epetra_Vector x(map);
  Epetra_Vector b(map);
  Epetra_Vector xexact(map);

  Epetra_Time FillTimer(Comm);
  x.Export(*readx, exporter, Add);
  b.Export(*readb, exporter, Add);
  xexact.Export(*readxexact, exporter, Add);
  Comm.Barrier();
  double vectorRedistributeTime = FillTimer.ElapsedTime();
  A.Export(*readA, exporter, Add);
  Comm.Barrier();
  double matrixRedistributeTime = FillTimer.ElapsedTime() - vectorRedistributeTime;
  assert(A.FillComplete()==0);    
  Comm.Barrier();
  double fillCompleteTime = FillTimer.ElapsedTime() - matrixRedistributeTime;
  if (Comm.MyPID()==0)	{
    cout << "\n\n****************************************************" << endl;
    cout << "\n Vector redistribute  time (sec) = " << vectorRedistributeTime<< endl;
    cout << "    Matrix redistribute time (sec) = " << matrixRedistributeTime << endl;
    cout << "    Transform to Local  time (sec) = " << fillCompleteTime << endl<< endl;
  }
  Epetra_Vector tmp1(*readMap);
  Epetra_Vector tmp2(map);
  readA->Multiply(false, *readxexact, tmp1);

  A.Multiply(false, xexact, tmp2);
  double residual;
  tmp1.Norm2(&residual);
  if (verbose) cout << "Norm of Ax from file            = " << residual << endl;
  tmp2.Norm2(&residual);
  if (verbose) cout << "Norm of Ax after redistribution = " << residual << endl << endl << endl;

  //cout << "A from file = " << *readA << endl << endl << endl;

  //cout << "A after dist = " << A << endl << endl << endl;

  delete readA;
  delete readx;
  delete readb;
  delete readxexact;
  delete readMap;

  Comm.Barrier();

  EpetraExt::RowMatrixToMatrixMarketFile("test.mm", A, "test matrix", "This is a test matrix");
				       
#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

return 0 ;
}
开发者ID:haripandey,项目名称:trilinos,代码行数:100,代码来源:MatrixMarket_IO.cpp

示例14: main

int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif

  int MyPID = comm.MyPID();

  bool verbose = false;
  bool verbose1 = false; 
  // Check if we should print results to standard out
  if (argc > 1) {
    if ((argv[1][0] == '-') && (argv[1][1] == 'v')) {
      verbose1 = true;
      if (MyPID==0) verbose = true;
    }
  }

  if (verbose1) cout << comm << endl;


  // Uncomment the next three lines to debug in mpi mode
  //int tmp;
  //if (MyPID==0) cin >> tmp;
  //comm.Barrier();

  Epetra_CrsMatrix * A; 
  EPETRA_CHK_ERR(EpetraExt::MatlabFileToCrsMatrix("A.dat", comm, A));

  Epetra_Vector  x(A->OperatorDomainMap()); 
  Epetra_Vector  b(A->OperatorRangeMap());
  x.Random();
  A->Apply(x,b); // Generate RHS from x
  Epetra_Vector xx(x); // Copy x to xx for later use

  Epetra_LinearProblem problem(A, &x, &b);
  // Construct a solver object for this problem

  AztecOO solver(problem);
  solver.SetAztecOption(AZ_precond, AZ_none);
  if (!verbose1) solver.SetAztecOption(AZ_output, AZ_none);
  solver.SetAztecOption(AZ_kspace, A->NumGlobalRows());
  AztecOO_Operator AOpInv(&solver, A->NumGlobalRows());
  Epetra_InvOperator AInvOp(&AOpInv);

  EPETRA_CHK_ERR(EpetraExt::OperatorToMatlabFile("Ainv.dat", AInvOp));

  comm.Barrier();

  Epetra_CrsMatrix * AInv; 
  EPETRA_CHK_ERR(EpetraExt::MatlabFileToCrsMatrix("Ainv.dat", comm, AInv));

  EPETRA_CHK_ERR(AInv->Apply(b,x));

  EPETRA_CHK_ERR(x.Update(1.0, xx, -1.0));
  double residual = 0.0;
  EPETRA_CHK_ERR(x.Norm2(&residual));
  if (verbose) cout << "Norm of difference between computed x and exact x = " << residual << endl;
  int ierr = checkValues(residual,0.0,"Norm of difference between computed A1x1 and A1x1 from file", verbose);

  
  delete A;
  delete AInv;


  #ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(ierr);
}
开发者ID:haripandey,项目名称:trilinos,代码行数:74,代码来源:cxx_main.cpp

示例15: main

int main(int argc, char *argv[]) {
cout << "going to setup MPI...\n";

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif
  cout << "mpit setup complete\n";

  int MyPID = comm.MyPID();

  char s [BUFSIZE] ;
  char matlabBuffer [MATLABBUF];
  cout << "going to init matlab\n";
  EpetraExt::EpetraExt_MatlabEngine * enginePtr = new EpetraExt::EpetraExt_MatlabEngine(comm);
  EpetraExt::EpetraExt_MatlabEngine & engine = *enginePtr;
  cout << "matlab started\n";
  
  /* GetCrsMatrix test
  engine.EvalString("CRSM=sparse(eye(8,10))", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << endl;
  int myM=4;
  int M = myM * comm.NumProc();
  int N = 10;  
  Epetra_Map getMap (M, 0, comm);
  Epetra_Map colMap(N, N, 0, comm);
  Epetra_CrsMatrix getCRSM (Copy, getMap, colMap, N);
  double colValue = 0;
  for(int row=myM*MyPID; row < myM*(MyPID+1); row++) {
    getCRSM.InsertGlobalValues(row, 1, &colValue, &row);
  }
  getCRSM.FillComplete(colMap, getMap);
  //getCRSM.FillComplete();
  int ierr = engine.GetCrsMatrix("CRSM", getCRSM, false);
  if (ierr) {
    cout << "engine.GetCrsMatrix(\"CRSM\", getCRSM, false) failed" << endl;
  }
  
  cout << getCRSM << endl;
  
  engine.EvalString("whos", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << endl;
  */
  
  /* GetIntSerialDenseMatrix test
  engine.EvalString("ISDM=rand(8,2)*100", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << endl;
  int procToGet = 1;
  int M = 8;
  int N = 2;  
  int* A = new int[M*N];
  Epetra_IntSerialDenseMatrix getISDM (View, A, M, M, N);
  int ierr = engine.GetIntSerialDenseMatrix("ISDM", getISDM, procToGet);
  if (ierr) {
    cout << "engine.GetIntSerialDenseMatrix(\"ISDM\", getISDM, procToGet) failed" << endl;
  }
  
  if (MyPID == 1) cout << getISDM << endl;
  */
  
  /* GetSerialDenseMatrix test
  engine.EvalString("SDM=rand(8,2)", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << endl;
  int procToGet = 1;
  int M = 8;
  int N = 2;  
  double* A = new double[M*N];
  Epetra_SerialDenseMatrix getSDM (View, A, M, M, N);
  int ierr = engine.GetSerialDenseMatrix("SDM", getSDM, procToGet);
  if (ierr) {
    cout << "engine.GetSerialDenseMatrix(\"SDM\", getSDM, procToGet) failed" << endl;
  }
  
  if (MyPID == 1) cout << getSDM << endl;
  */
  
  /* GetMultiVector test
  if (comm.NumProc() != 2) {
    if (MyPID == 0) cout << "Error: this test must be run with exactly two PE." << endl;
    delete &engine;
    #ifdef EPETRA_MPI
    MPI_Finalize();
    #endif
    return(-1);
  }
  engine.EvalString("MV=rand(8,2)", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << endl;
  int myM = 4;
  int M = myM * comm.NumProc();
  int N = 2;
  Epetra_Map getMap (M, 0, comm);
  double* A = new double[myM*N];
  Epetra_MultiVector getMV (View, getMap, A, myM, N);
  cout << "MultiVector created" << endl;
  int ierr = engine.GetMultiVector("MV", getMV);
  if (ierr) {
    cout << "engine.GetMultiVector(\"MV\", getMV) failed" << endl;
  }
//.........这里部分代码省略.........
开发者ID:haripandey,项目名称:trilinos,代码行数:101,代码来源:cxx_main.cpp


注:本文中的Epetra_MpiComm::MyPID方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。