本文整理汇总了C++中Epetra_SerialComm::SumAll方法的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_SerialComm::SumAll方法的具体用法?C++ Epetra_SerialComm::SumAll怎么用?C++ Epetra_SerialComm::SumAll使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Epetra_SerialComm
的用法示例。
在下文中一共展示了Epetra_SerialComm::SumAll方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
int mypid = Comm.MyPID();
#else
Epetra_SerialComm Comm;
int mypid = 0;
#endif
// Read XML input deck
ParameterList masterList;
if (argc > 1) {
if (strncmp("-h",argv[1],2) == 0) {
cout << "help" << endl;
ML_Print_Help();
ML_Exit(mypid,0,EXIT_SUCCESS);
}
else {
int i=0,j;
FILE* fid = fopen(argv[1],"r");
if (fid) {
i++;
fclose(fid);
}
Comm.SumAll(&i, &j, 1);
if (j!=Comm.NumProc()) {
cout << "Could not open input file." << endl;
ML_Print_Help();
ML_Exit(mypid,0,EXIT_FAILURE);
}
FileInputSource fileSrc(argv[1]);
XMLObject fileXML = fileSrc.getObject();
XMLParameterListReader ListReader;
masterList = ListReader.toParameterList(fileXML);
}
} else {
cout << "No input file specified." << endl;
ML_Print_Help();
ML_Exit(mypid,0,EXIT_SUCCESS);
}
ParameterList *fileList, *AztecOOList;
try {fileList = &(masterList.sublist("data files",true));}
catch(...) {ML_Exit(mypid,"Missing \"data files\" sublist.",EXIT_FAILURE);}
try {AztecOOList = &(masterList.sublist("AztecOO"));}
catch(...) {ML_Exit(mypid,"Missing \"AztecOO\" sublist.",EXIT_FAILURE);}
#ifdef ML_SCALING
const int ntimers=4;
enum {total, probBuild, precBuild, solve};
ml_DblLoc timeVec[ntimers], maxTime[ntimers], minTime[ntimers];
for (int i=0; i<ntimers; i++) timeVec[i].rank = Comm.MyPID();
timeVec[total].value = MPI_Wtime();
#endif
string matrixfile = fileList->get("matrix input file","A.dat");
const char *datafile = matrixfile.c_str();
int numGlobalRows;
ML_Read_Matrix_Dimensions(datafile, &numGlobalRows, Comm);
#ifdef ML_SCALING
timeVec[probBuild].value = MPI_Wtime();
#endif
// ===================================================== //
// READ IN MATRICES FROM FILE //
// ===================================================== //
if (!mypid) printf("reading %s\n",datafile); fflush(stdout);
Epetra_CrsMatrix *Amat=NULL;
//Epetra_Map *RowMap=NULL;
int errCode=0;
//if (RowMap) errCode=EpetraExt::MatrixMarketFileToCrsMatrix(datafile, *RowMap, Amat);
//else errCode=EpetraExt::MatrixMarketFileToCrsMatrix(datafile, Comm, Amat);
errCode=EpetraExt::MatrixMarketFileToCrsMatrix(datafile, Comm, Amat);
if (errCode) ML_Exit(mypid,"error reading matrix", EXIT_FAILURE);
Amat->OptimizeStorage();
Epetra_Vector LHS(Amat->RowMap()); LHS.Random();
Epetra_Vector RHS(Amat->RowMap()); RHS.PutScalar(0.0);
Epetra_LinearProblem Problem(Amat, &LHS, &RHS);
#ifdef ML_SCALING
timeVec[probBuild].value = MPI_Wtime() - timeVec[probBuild].value;
#endif
// =========================== build preconditioner ===========================
#ifdef ML_SCALING
timeVec[precBuild].value = MPI_Wtime();
#endif
// no preconditioner right now
#ifdef ML_SCALING
timeVec[precBuild].value = MPI_Wtime() - timeVec[precBuild].value;
#endif
//.........这里部分代码省略.........
示例2: main
//.........这里部分代码省略.........
SolverType.push_back("Amesos_Superlu");
SolverType.push_back("Amesos_Superludist");
SolverType.push_back("Amesos_Mumps");
SolverType.push_back("Amesos_Dscpack");
SolverType.push_back("Amesos_Scalapack");
#endif
Epetra_Time Time(Comm);
// this is the Amesos factory object that will create
// a specific Amesos solver.
Amesos Factory;
// Cycle over all solvers.
// Only installed solvers will be tested.
for (unsigned int i = 0 ; i < SolverType.size() ; ++i)
{
// Check whether the solver is available or not
if (Factory.Query(SolverType[i]))
{
// 1.- set exact solution (constant vector)
LHS.PutScalar(1.0);
// 2.- create corresponding rhs
Matrix->Multiply(false, LHS, RHS);
// 3.- randomize solution vector
LHS.Random();
// 4.- create the amesos solver object
Amesos_BaseSolver* Solver = Factory.Create(SolverType[i], Problem);
assert (Solver != 0);
Solver->SetParameters(List);
Solver->SetUseTranspose( true) ;
// 5.- factorize and solve
Comm.Barrier() ;
if (verbose)
std::cout << std::endl
<< "Solver " << SolverType[i]
<< ", verbose = " << verbose << std::endl ;
Comm.Barrier() ;
Time.ResetStartTime();
AMESOS_CHK_ERR(Solver->SymbolicFactorization());
if (verbose)
std::cout << std::endl
<< "Solver " << SolverType[i]
<< ", symbolic factorization time = "
<< Time.ElapsedTime() << std::endl;
Comm.Barrier() ;
AMESOS_CHK_ERR(Solver->NumericFactorization());
if (verbose)
std::cout << "Solver " << SolverType[i]
<< ", numeric factorization time = "
<< Time.ElapsedTime() << std::endl;
Comm.Barrier() ;
AMESOS_CHK_ERR(Solver->Solve());
if (verbose)
std::cout << "Solver " << SolverType[i]
<< ", solve time = "
<< Time.ElapsedTime() << std::endl;
Comm.Barrier() ;
// 6.- compute difference between exact solution and Amesos one
// (there are other ways of doing this in Epetra, but let's
// keep it simple)
double d = 0.0, d_tot = 0.0;
for (int j = 0 ; j< LHS.Map().NumMyElements() ; ++j)
d += (LHS[0][j] - 1.0) * (LHS[0][j] - 1.0);
Comm.SumAll(&d,&d_tot,1);
if (verbose)
std::cout << "Solver " << SolverType[i] << ", ||x - x_exact||_2 = "
<< sqrt(d_tot) << std::endl;
// 7.- delete the object
delete Solver;
TotalResidual += d_tot;
}
}
delete Matrix;
delete Map;
if (TotalResidual > 1e-9)
exit(EXIT_FAILURE);
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return(EXIT_SUCCESS);
} // end of main()
示例3: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
#define ML_SCALING
#ifdef ML_SCALING
const int ntimers=4;
enum {total, probBuild, precBuild, solve};
ml_DblLoc timeVec[ntimers], maxTime[ntimers], minTime[ntimers];
for (int i=0; i<ntimers; i++) timeVec[i].rank = Comm.MyPID();
timeVec[total].value = MPI_Wtime();
#endif
int nx;
if (argc > 1) nx = (int) strtol(argv[1],NULL,10);
else nx = 256;
if (nx < 1) nx = 256; // input a nonpositive integer if you want to specify
// the XML input file name.
nx = nx*(int)sqrt((double)Comm.NumProc());
int ny = nx;
printf("nx = %d\nny = %d\n",nx,ny);
fflush(stdout);
char xmlFile[80];
bool readXML=false;
if (argc > 2) {strcpy(xmlFile,argv[2]); readXML = true;}
else sprintf(xmlFile,"%s","params.xml");
ParameterList GaleriList;
GaleriList.set("nx", nx);
GaleriList.set("ny", ny);
#ifdef ML_SCALING
timeVec[probBuild].value = MPI_Wtime();
#endif
Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList);
Epetra_CrsMatrix* A = CreateCrsMatrix("Laplace2D", Map, GaleriList);
if (!Comm.MyPID()) printf("nx = %d, ny = %d, mx = %d, my = %d\n",nx,ny,GaleriList.get("mx",-1),GaleriList.get("my",-1));
fflush(stdout);
//avoid potential overflow
double numMyRows = A->NumMyRows();
double numGlobalRows;
Comm.SumAll(&numMyRows,&numGlobalRows,1);
if (!Comm.MyPID()) printf("# global rows = %1.0f\n",numGlobalRows);
//printf("pid %d: #rows = %d\n",Comm.MyPID(),A->NumMyRows());
fflush(stdout);
Epetra_MultiVector *coords = CreateCartesianCoordinates("2D", Map,GaleriList);
double *x_coord=0,*y_coord=0,*z_coord=0;
double **ttt;
if (!coords->ExtractView(&ttt)) {
x_coord = ttt[0];
y_coord = ttt[1];
} else {
if (!Comm.MyPID()) printf("Error extracting coordinate vectors\n");
MPI_Finalize();
exit(EXIT_FAILURE);
}
Epetra_Vector LHS(*Map); LHS.Random();
Epetra_Vector RHS(*Map); RHS.PutScalar(0.0);
Epetra_LinearProblem Problem(A, &LHS, &RHS);
AztecOO solver(Problem);
#ifdef ML_SCALING
timeVec[probBuild].value = MPI_Wtime() - timeVec[probBuild].value;
#endif
// =========================== begin of ML part ===========================
#ifdef ML_SCALING
timeVec[precBuild].value = MPI_Wtime();
#endif
ParameterList MLList;
if (readXML) {
MLList.set("read XML",true);
MLList.set("XML input file",xmlFile);
}
else {
cout << "here" << endl;
ML_Epetra::SetDefaults("SA",MLList);
MLList.set("smoother: type","Chebyshev");
MLList.set("smoother: sweeps",3);
MLList.set("coarse: max size",1);
}
MLList.set("x-coordinates", x_coord);
MLList.set("y-coordinates", y_coord);
MLList.set("z-coordinates", z_coord);
//.........这里部分代码省略.........
示例4: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc, &argv);
// define an Epetra communicator
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
// get the proc ID of this process
int MyPID = Comm.MyPID();
// get the total number of processes
int NumProc = Comm.NumProc();
// output some information to std output
cout << Comm << endl;
// ======================== //
// now some basic MPI calls //
// ------------------------ //
int ivalue;
double dvalue, dvalue2;
double* dvalues; dvalues = new double[NumProc];
double* dvalues2; dvalues2 = new double[NumProc];
int root = 0;
// equivalent to MPI_Barrier
Comm.Barrier();
if (MyPID == root) dvalue = 12.0;
// On input, the root processor contains the list of values
// (in this case, a single value). On exit, all processes will
// have he same list of values. Note that all values must be allocated
// vefore the broadcast
// equivalent to MPI_Broadcast
Comm.Broadcast(&dvalue, 1, root);
// as before, but with integer values. As C++ can bind to the appropriate
// interface based on argument typing, the type of data is not required.
Comm.Broadcast(&ivalue, 1, root);
// equivalent MPI_Allgather
Comm.GatherAll(dvalues, dvalues2, 1);
// equivalent to MPI_Allreduce with MPI_SUM
dvalue = 1.0*MyPID;
Comm.SumAll( &dvalue, dvalues, 1);
// equivalent to MPI_Allreduce with MPI_SUM
Comm.MaxAll( &dvalue, dvalues, 1);
// equiavant to MPI_Scan with MPI_SUM
dvalue = 1.0 * MyPID;
Comm.ScanSum(&dvalue, &dvalue2, 1);
cout << "On proc " << MyPID << " dvalue2 = " << dvalue2 << endl;
delete[] dvalues;
delete[] dvalues2;
// ======================= //
// Finalize MPI and return //
// ----------------------- //
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return( EXIT_SUCCESS );
} /* main */