本文整理汇总了C++中Epetra_SerialComm类的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_SerialComm类的具体用法?C++ Epetra_SerialComm怎么用?C++ Epetra_SerialComm使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Epetra_SerialComm类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
// this example is in serial only
if (Comm.NumProc()>1) exit(0);
FileGrid Grid(Comm, "Hex_3D.grid");
// create a list of all nodes that are linked to a face
// we have 4 interfaces here with each 2 sides:
// with tags 1/2, 11/12, 21/22, 31/32
const int ninter = 4;
vector<map<int,int> > nodes(ninter*2);
for (int i=0; i<Grid.NumMyBoundaryFaces(); ++i)
{
int tag;
int nodeids[4];
Grid.FaceVertices(i,tag,nodeids);
if (tag==1)
{
for (int j=0; j<4; ++j)
nodes[0][nodeids[j]] = nodeids[j];
}
else if (tag==2)
{
for (int j=0; j<4; ++j)
nodes[1][nodeids[j]] = nodeids[j];
}
else if (tag==11)
{
for (int j=0; j<4; ++j)
nodes[2][nodeids[j]] = nodeids[j];
}
else if (tag==12)
{
for (int j=0; j<4; ++j)
nodes[3][nodeids[j]] = nodeids[j];
}
else if (tag==21)
{
for (int j=0; j<4; ++j)
nodes[4][nodeids[j]] = nodeids[j];
}
else if (tag==22)
{
for (int j=0; j<4; ++j)
nodes[5][nodeids[j]] = nodeids[j];
}
else if (tag==31)
{
for (int j=0; j<4; ++j)
nodes[6][nodeids[j]] = nodeids[j];
}
else if (tag==32)
{
for (int j=0; j<4; ++j)
nodes[7][nodeids[j]] = nodeids[j];
}
else
continue;
}
// ------------------------------------------------------------- //
// create 4 empty MOERTEL::Interface instances
// ------------------------------------------------------------- //
int printlevel = 3; // ( moertel takes values 0 - 10 )
//int printlevel = 8; // ( moertel takes values 0 - 10 ) // GAH gives info about intersection root finding
vector<RefCountPtr<MOERTEL::Interface> > interfaces(ninter);
for (int i=0; i<ninter; ++i)
interfaces[i] = rcp(new MOERTEL::Interface(i,false,Comm,printlevel));
// ------------------------------------------------------------- //
// Add nodes on both sides of interface to interfaces
// loop all nodes in the maps add them
// to the interface with unique ids
// ------------------------------------------------------------- //
for (int i=0; i<ninter; ++i)
{
map<int,int>::iterator curr;
for (int j=0; j<2; ++j)
for (curr = nodes[i*2+j].begin(); curr != nodes[i*2+j].end(); ++curr)
{
// get unique node id
int nodeid = curr->second;
// get node coordinates
double coord[3];
Grid.VertexCoord(nodeid,coord);
// create a moertel node
MOERTEL::Node node(nodeid,coord,1,&nodeid,false,printlevel);
// add node to interface i on side j
interfaces[i]->AddNode(node,j);
}
}
//.........这里部分代码省略.........
示例2: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
Teuchos::GlobalMPISession mpiSession(&argc, &argv, 0);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
int nProcs, myPID ;
Teuchos::ParameterList pLUList ; // ParaLU parameters
Teuchos::ParameterList isoList ; // Isorropia parameters
Teuchos::ParameterList shyLUList ; // shyLU parameters
Teuchos::ParameterList ifpackList ; // shyLU parameters
string ipFileName = "ShyLU.xml"; // TODO : Accept as i/p
nProcs = mpiSession.getNProc();
myPID = Comm.MyPID();
if (myPID == 0)
{
cout <<"Parallel execution: nProcs="<< nProcs << endl;
}
// =================== Read input xml file =============================
Teuchos::updateParametersFromXmlFile(ipFileName, &pLUList);
isoList = pLUList.sublist("Isorropia Input");
shyLUList = pLUList.sublist("ShyLU Input");
shyLUList.set("Outer Solver Library", "AztecOO");
// Get matrix market file name
string MMFileName = Teuchos::getParameter<string>(pLUList, "mm_file");
string prec_type = Teuchos::getParameter<string>(pLUList, "preconditioner");
int maxiters = Teuchos::getParameter<int>(pLUList, "Outer Solver MaxIters");
double tol = Teuchos::getParameter<double>(pLUList, "Outer Solver Tolerance");
string rhsFileName = pLUList.get<string>("rhs_file", "");
if (myPID == 0)
{
cout << "Input :" << endl;
cout << "ParaLU params " << endl;
pLUList.print(std::cout, 2, true, true);
cout << "Matrix market file name: " << MMFileName << endl;
}
// ==================== Read input Matrix ==============================
Epetra_CrsMatrix *A;
Epetra_MultiVector *b1;
int err = EpetraExt::MatrixMarketFileToCrsMatrix(MMFileName.c_str(), Comm,
A);
//EpetraExt::MatlabFileToCrsMatrix(MMFileName.c_str(), Comm, A);
//assert(err != 0);
//cout <<"Done reading the matrix"<< endl;
int n = A->NumGlobalRows();
//cout <<"n="<< n << endl;
// Create input vectors
Epetra_Map vecMap(n, 0, Comm);
if (rhsFileName != "")
{
err = EpetraExt::MatrixMarketFileToMultiVector(rhsFileName.c_str(),
vecMap, b1);
}
else
{
b1 = new Epetra_MultiVector(vecMap, 1, false);
b1->PutScalar(1.0);
}
Epetra_MultiVector x(vecMap, 1);
//cout << "Created the vectors" << endl;
// Partition the matrix with hypergraph partitioning and redisstribute
Isorropia::Epetra::Partitioner *partitioner = new
Isorropia::Epetra::Partitioner(A, isoList, false);
partitioner->partition();
Isorropia::Epetra::Redistributor rd(partitioner);
Epetra_CrsMatrix *newA;
Epetra_MultiVector *newX, *newB;
rd.redistribute(*A, newA);
delete A;
A = newA;
rd.redistribute(x, newX);
rd.redistribute(*b1, newB);
Epetra_LinearProblem problem(A, newX, newB);
AztecOO solver(problem);
ifpackList ;
Ifpack_Preconditioner *prec;
ML_Epetra::MultiLevelPreconditioner *MLprec;
if (prec_type.compare("ShyLU") == 0)
{
prec = new Ifpack_ShyLU(A);
prec->SetParameters(shyLUList);
prec->Initialize();
prec->Compute();
//(dynamic_cast<Ifpack_ShyLU *>(prec))->JustTryIt();
//.........这里部分代码省略.........
示例3: main
// ------------------------------------------------------------------------
// --------------------------- Main Program -----------------------------
// ------------------------------------------------------------------------
int main(int argc, char *argv[])
{
// Initialize MPI
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
// Get the process ID and the total number of processors
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
bool verbose = false;
// Check for verbose output
if (argc>1)
if (argv[1][0]=='-' && argv[1][1]=='v')
verbose = true;
// Get the number of elements from the command line
int NumGlobalElements = 0;
if ((argc > 2) && (verbose))
NumGlobalElements = atoi(argv[2]) + 1;
else if ((argc > 1) && (!verbose))
NumGlobalElements = atoi(argv[1]) + 1;
else
NumGlobalElements = 101;
bool success = false;
try {
// The number of unknowns must be at least equal to the
// number of processors.
if (NumGlobalElements < NumProc) {
std::cout << "numGlobalBlocks = " << NumGlobalElements
<< " cannot be < number of processors = " << NumProc << std::endl;
throw "NOX Error";
}
// Create the interface between NOX and the application
// This object is derived from NOX::Epetra::Interface
Teuchos::RCP<TransientInterface> interface =
Teuchos::rcp(new TransientInterface(NumGlobalElements, Comm, -20.0, 20.0));
double dt = 0.10;
interface->setdt(dt);
// Set the PDE nonlinear coefficient for this problem
interface->setPDEfactor(1.0);
// Get the vector from the Problem
Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
NOX::Epetra::Vector noxSoln(soln, NOX::Epetra::Vector::CreateView);
// Begin Nonlinear Solver ************************************
// Create the top level parameter list
Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
Teuchos::rcp(new Teuchos::ParameterList);
Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());
// Set the nonlinear solver method
nlParams.set("Nonlinear Solver", "Line Search Based");
// Set the printing parameters in the "Printing" sublist
Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 3);
printParams.set("Output Processor", 0);
if (verbose)
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::LinearSolverDetails +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::Debug +
NOX::Utils::Error);
else
printParams.set("Output Information", NOX::Utils::Error);
// Create a print class for controlling output below
NOX::Utils utils(printParams);
// Sublist for line search
Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search");
searchParams.set("Method", "Full Step");
// Sublist for direction
Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
dirParams.set("Method", "Newton");
Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton");
//.........这里部分代码省略.........
示例4: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm comm;
#endif
Galeri::core::Workspace::setNumDimensions(3);
Galeri::grid::Loadable domain, boundary;
int numGlobalElementsX = 2 * comm.NumProc();
int numGlobalElementsY = 2;
int numGlobalElementsZ = 2;
int mx = comm.NumProc();
int my = 1;
int mz = 1;
Galeri::grid::Generator::
getCubeWithHexs(comm, numGlobalElementsX, numGlobalElementsY, numGlobalElementsZ,
mx, my, mz, domain, boundary);
Epetra_Map matrixMap(domain.getNumGlobalVertices(), 0, comm);
Epetra_FECrsMatrix A(Copy, matrixMap, 0);
Epetra_FEVector LHS(matrixMap);
Epetra_FEVector RHS(matrixMap);
Galeri::problem::ScalarLaplacian<Laplacian> problem("Hex", 1, 8);
problem.integrate(domain, A, RHS);
LHS.PutScalar(0.0);
problem.imposeDirichletBoundaryConditions(boundary, A, RHS, LHS);
// ============================================================ //
// Solving the linear system is the next step, using the IFPACK //
// factory. This is done by using the IFPACK factory, then //
// asking for IC preconditioner, and setting few parameters //
// using a Teuchos::ParameterList. //
// ============================================================ //
Ifpack Factory;
Ifpack_Preconditioner* Prec = Factory.Create("IC", &A, 0);
Teuchos::ParameterList list;
list.set("fact: level-of-fill", 1);
IFPACK_CHK_ERR(Prec->SetParameters(list));
IFPACK_CHK_ERR(Prec->Initialize());
IFPACK_CHK_ERR(Prec->Compute());
Epetra_LinearProblem linearProblem(&A, &LHS, &RHS);
AztecOO solver(linearProblem);
solver.SetAztecOption(AZ_solver, AZ_cg);
solver.SetPrecOperator(Prec);
solver.Iterate(1550, 1e-9);
// visualization using MEDIT -- a VTK module is available as well
Galeri::viz::MEDIT::write(domain, "sol", LHS);
// now compute the norm of the solution
problem.computeNorms(domain, LHS);
#ifdef HAVE_MPI
MPI_Finalize();
#endif
}
示例5: TEUCHOS_UNIT_TEST
TEUCHOS_UNIT_TEST(Thyra_NonlinearSolver, WithResetModel)
{
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
// Problem size only supports 1 mpi process
TEST_EQUALITY(Comm.NumProc(), 1);
// Create the model evaluator object
double d = 10.0;
double p0 = 2.0;
double p1 = 0.0;
double x00 = 0.0;
double x01 = 1.0;
Teuchos::RCP<ModelEvaluator2DSim<double> > thyraModel =
Teuchos::rcp(new ModelEvaluator2DSim<double>(Teuchos::rcp(&Comm,false),
d,p0,p1,x00,x01));
::Stratimikos::DefaultLinearSolverBuilder builder;
Teuchos::RCP<Teuchos::ParameterList> p =
Teuchos::rcp(new Teuchos::ParameterList);
p->set("Linear Solver Type", "AztecOO");
p->set("Preconditioner Type", "Ifpack");
builder.setParameterList(p);
Teuchos::RCP< ::Thyra::LinearOpWithSolveFactoryBase<double> >
lowsFactory = builder.createLinearSolveStrategy("");
thyraModel->set_W_factory(lowsFactory);
// Create nox parameter list
Teuchos::RCP<Teuchos::ParameterList> nl_params =
Teuchos::rcp(new Teuchos::ParameterList);
nl_params->set("Nonlinear Solver", "Line Search Based");
// Create a Thyra nonlinear solver
Teuchos::RCP< ::Thyra::NonlinearSolverBase<double> > solver =
Teuchos::rcp(new ::Thyra::NOXNonlinearSolver);
solver->setParameterList(nl_params);
solver->setModel(thyraModel);
Teuchos::RCP< ::Thyra::VectorBase<double> >
initial_guess = thyraModel->getNominalValues().get_x()->clone_v();
::Thyra::SolveCriteria<double> solve_criteria;
::Thyra::SolveStatus<double> solve_status;
solve_status = solver->solve(initial_guess.get(), &solve_criteria);
TEST_ASSERT(solve_status.extraParameters->isType<int>("Number of Iterations"));
TEST_EQUALITY(solve_status.extraParameters->get<int>("Number of Iterations"), 7);
TEST_EQUALITY(solve_status.solveStatus, ::Thyra::SOLVE_STATUS_CONVERGED);
// Test the reset capability for using a new model evalautor with a
// different set of parameters
p1 = 2.0;
thyraModel =
Teuchos::rcp(new ModelEvaluator2DSim<double>(Teuchos::rcp(&Comm,false),
d,p0,p1,x00,x01));
thyraModel->set_W_factory(lowsFactory);
solver->setModel(thyraModel);
initial_guess = thyraModel->getNominalValues().get_x()->clone_v();
solve_status = solver->solve(initial_guess.get(), &solve_criteria);
TEST_ASSERT(solve_status.extraParameters->isType<int>("Number of Iterations"));
TEST_EQUALITY(solve_status.extraParameters->get<int>("Number of Iterations"), 9);
TEST_EQUALITY(solve_status.solveStatus, ::Thyra::SOLVE_STATUS_CONVERGED);
Teuchos::TimeMonitor::summarize();
}
示例6: main
int main(int argc, char *argv[])
{
// Initialize MPI
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
// Get the process ID and the total number of processors
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
// Check verbosity level
bool verbose = false;
if (argc > 1)
if (argv[1][0]=='-' && argv[1][1]=='v')
verbose = true;
// Get the number of elements from the command line
int NumGlobalElements = 0;
if ((argc > 2) && (verbose))
NumGlobalElements = atoi(argv[2]) + 1;
else if ((argc > 1) && (!verbose))
NumGlobalElements = atoi(argv[1]) + 1;
else
NumGlobalElements = 101;
bool success = false;
try {
// The number of unknowns must be at least equal to the
// number of processors.
if (NumGlobalElements < NumProc) {
std::cout << "numGlobalBlocks = " << NumGlobalElements
<< " cannot be < number of processors = " << NumProc << std::endl;
std::cout << "Test failed!" << std::endl;
throw "NOX Error";
}
// Create the interface between NOX and the application
// This object is derived from NOX::Epetra::Interface
Teuchos::RCP<Interface> interface =
Teuchos::rcp(new Interface(NumGlobalElements, Comm));
// Get the vector from the Problem
Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
Teuchos::RCP<NOX::Epetra::Vector> noxSoln =
Teuchos::rcp(new NOX::Epetra::Vector(soln,
NOX::Epetra::Vector::CreateView));
// Set the PDE factor (for nonlinear forcing term). This could be specified
// via user input.
interface->setPDEfactor(1000.0);
// Set the initial guess
soln->PutScalar(1.0);
// Begin Nonlinear Solver ************************************
// Create the top level parameter list
Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
Teuchos::rcp(new Teuchos::ParameterList);
Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());
// Set the nonlinear solver method
nlParams.set("Nonlinear Solver", "Line Search Based");
// Set the printing parameters in the "Printing" sublist
Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 3);
printParams.set("Output Processor", 0);
if (verbose)
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::LinearSolverDetails +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::Debug +
NOX::Utils::TestDetails +
NOX::Utils::Error);
else
printParams.set("Output Information", NOX::Utils::Error +
NOX::Utils::TestDetails);
// Create a print class for controlling output below
NOX::Utils printing(printParams);
// Sublist for line search
Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search");
searchParams.set("Method", "Full Step");
//.........这里部分代码省略.........
示例7: main
int main(int narg, char *arg[])
{
using std::cout;
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init(&narg,&arg);
Epetra_MpiComm comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm comm;
#endif
int me = comm.MyPID();
int np = comm.NumProc();
ITYPE nGlobalRows = 10;
if (narg > 1)
nGlobalRows = (ITYPE) atol(arg[1]);
bool verbose = (nGlobalRows < 20);
// Linear map similar to Trilinos default,
// but want to allow adding OFFSET_EPETRA64 to the indices.
int nMyRows = (int) (nGlobalRows / np + (nGlobalRows % np > me));
ITYPE myFirstRow = (ITYPE)(me * (nGlobalRows / np) + MIN(nGlobalRows%np, me));
ITYPE *myGlobalRows = new ITYPE[nMyRows];
for (int i = 0; i < nMyRows; i++)
myGlobalRows[i] = (ITYPE)i + myFirstRow + OFFSET_EPETRA64;
Epetra_Map *rowMap = new Epetra_Map(-1, nMyRows, &myGlobalRows[0], 0, comm);
if (verbose) rowMap->Print(std::cout);
// Create an integer vector nnzPerRow that is used to build the Epetra Matrix.
// nnzPerRow[i] is the number of entries for the ith local equation
std::vector<int> nnzPerRow(nMyRows+1, 0);
// Also create lists of the nonzeros to be assigned to processors.
// To save programming time and complexity, these vectors are allocated
// bigger than they may actually be needed.
std::vector<ITYPE> iv(3*nMyRows+1);
std::vector<ITYPE> jv(3*nMyRows+1);
std::vector<double> vv(3*nMyRows+1);
// Generate the nonzeros for the Laplacian matrix.
ITYPE nMyNonzeros = 0;
for (ITYPE i = 0, myrowcnt = 0; i < nGlobalRows; i++) {
if (rowMap->MyGID(i+OFFSET_EPETRA64)) {
// This processor owns this row; add nonzeros.
if (i > 0) {
iv[nMyNonzeros] = i + OFFSET_EPETRA64;
jv[nMyNonzeros] = i-1 + OFFSET_EPETRA64;
vv[nMyNonzeros] = -1;
if (verbose)
std::cout << "(" << iv[nMyNonzeros] << "," << jv[nMyNonzeros] << ")="
<< vv[nMyNonzeros] << " on processor " << me
<< " in " << myrowcnt << std::endl;
nMyNonzeros++;
nnzPerRow[myrowcnt]++;
}
iv[nMyNonzeros] = i + OFFSET_EPETRA64;
jv[nMyNonzeros] = i + OFFSET_EPETRA64;
vv[nMyNonzeros] = ((i == 0 || i == nGlobalRows-1) ? 1. : 2.);
if (verbose)
std::cout << "(" << iv[nMyNonzeros] << "," << jv[nMyNonzeros] << ")="
<< vv[nMyNonzeros] << " on processor " << me
<< " in " << myrowcnt << std::endl;
nMyNonzeros++;
nnzPerRow[myrowcnt]++;
if (i < nGlobalRows - 1) {
iv[nMyNonzeros] = i + OFFSET_EPETRA64;
jv[nMyNonzeros] = i+1 + OFFSET_EPETRA64;
vv[nMyNonzeros] = -1;
if (verbose)
std::cout << "(" << iv[nMyNonzeros] << "," << jv[nMyNonzeros] << ")="
<< vv[nMyNonzeros] << " on processor " << me
<< " in " << myrowcnt << std::endl;
nMyNonzeros++;
nnzPerRow[myrowcnt]++;
}
myrowcnt++;
}
}
// Create an Epetra_Matrix
Epetra_CrsMatrix *A = new Epetra_CrsMatrix(Copy, *rowMap, &nnzPerRow[0], false);
// Insert the nonzeros.
int info;
ITYPE sum = 0;
for (int i=0; i < nMyRows; i++) {
if (nnzPerRow[i]) {
if (verbose) {
std::cout << "InsertGlobalValus row " << iv[sum]
<< " count " << nnzPerRow[i]
<< " cols " << jv[sum] << " " << jv[sum+1] << " ";
if (nnzPerRow[i] == 3) std::cout << jv[sum+2];
std::cout << std::endl;
}
info = A->InsertGlobalValues(iv[sum],nnzPerRow[i],&vv[sum],&jv[sum]);
//.........这里部分代码省略.........
示例8: testTwoPts
void testTwoPts()
{
Epetra_SerialComm comm;
// set up a hard-coded layout for two points
int numCells = 2;
int numBonds = 2;
// set up overlap maps, which include ghosted nodes
// in this case we're on a single processor, so these
// maps are essentially the identity map
int numGlobalElements = numCells;
int numMyElements = numGlobalElements;
int* myGlobalElements = new int[numMyElements];
int elementSize = 1;
for(int i=0; i<numMyElements ; ++i){
myGlobalElements[i] = i;
}
int indexBase = 0;
// oneDimensionalOverlapMap
// used for cell volumes and scalar constitutive data
Epetra_BlockMap oneDimensionalOverlapMap(numGlobalElements, numMyElements, myGlobalElements, elementSize, indexBase, comm);
// used for positions, displacements, velocities and vector constitutive data
elementSize = 3;
Epetra_BlockMap threeDimensionalOverlapMap(numGlobalElements, numMyElements, myGlobalElements, elementSize, indexBase, comm);
delete[] myGlobalElements;
// bondMap
// used for bond damage and bond constitutive data
numGlobalElements = numBonds;
numMyElements = numGlobalElements;
myGlobalElements = new int[numMyElements];
elementSize = 1;
Epetra_BlockMap bondMap(numGlobalElements, numMyElements, myGlobalElements, elementSize, indexBase, comm);
delete[] myGlobalElements;
// create a linear elastic isotropic peridynamic solid material model
// could also use a MaterialFactory object to create the material here
Teuchos::ParameterList params;
params.set("Density", 7800.0);
params.set("Bulk Modulus", 130.0e9);
params.set("Shear Modulus", 78.0e9);
PeridigmNS::LinearElasticIsotropicMaterial mat(params);
// create the NeighborhoodData
// both points are neighbors of each other
PeridigmNS::NeighborhoodData neighborhoodData;
neighborhoodData.SetNumOwned(2);
neighborhoodData.SetNeighborhoodListSize(4);
int* const ownedIDs = neighborhoodData.OwnedIDs();
ownedIDs[0] = 0;
ownedIDs[1] = 1;
int* const neighborhoodList = neighborhoodData.NeighborhoodList();
neighborhoodList[0] = 1;
neighborhoodList[1] = 1;
neighborhoodList[2] = 1;
neighborhoodList[3] = 0;
int* const neighborhoodPtr = neighborhoodData.NeighborhoodPtr();
neighborhoodPtr[0] = 0;
neighborhoodPtr[1] = 2;
// create a block and load the material model
PeridigmNS::Block block("test", 1);
block.setMaterialModel(Teuchos::rcp(&mat, false));
// create the blockID vector
Epetra_Vector blockIDs(oneDimensionalOverlapMap);
blockIDs.PutScalar(1.0);
// initialize the block
// in serial, the overlap and non-overlap maps are the same
block.initialize(Teuchos::rcp(&oneDimensionalOverlapMap, false),
Teuchos::rcp(&oneDimensionalOverlapMap, false),
Teuchos::rcp(&threeDimensionalOverlapMap, false),
Teuchos::rcp(&threeDimensionalOverlapMap, false),
Teuchos::rcp(&bondMap, false),
Teuchos::rcp(&blockIDs, false),
Teuchos::rcp(&neighborhoodData, false));
// time step
double dt = 1.0;
// create a workset with rcps to the relevant data
PHAL::Workset workset;
workset.timeStep = Teuchos::RCP<double>(&dt, false);
workset.jacobian = Teuchos::RCP<PeridigmNS::SerialMatrix>(); // null rcp, not used in this test
workset.blocks = Teuchos::rcp(new std::vector<PeridigmNS::Block>() );
workset.myPID = comm.MyPID();
workset.blocks->push_back(block);
// set the data for the two-point discretization
Epetra_Vector& x = *block.getData(Field_NS::COORD3D, Field_ENUM::STEP_NONE);
Epetra_Vector& y = *block.getData(Field_NS::CURCOORD3D, Field_ENUM::STEP_NP1);
x[0] = 0.0; x[1] = 0.0; x[2] = 0.0;
x[3] = 1.0; x[4] = 0.0; x[5] = 0.0;
y[0] = 0.0; y[1] = 0.0; y[2] = 0.0;
y[3] = 2.0; y[4] = 0.0; y[5] = 0.0;
block.getData(Field_NS::VOLUME, Field_ENUM::STEP_NONE)->PutScalar(1.0);
//.........这里部分代码省略.........
示例9: main
int main( int argc, char* argv[] )
{
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
// Create command line processor
Teuchos::CommandLineProcessor RBGen_CLP;
RBGen_CLP.recogniseAllOptions( false );
RBGen_CLP.throwExceptions( false );
// Generate list of acceptable command line options
bool verbose = false;
std::string xml_file = "";
RBGen_CLP.setOption("verbose", "quiet", &verbose, "Print messages and results.");
RBGen_CLP.setOption("xml-file", &xml_file, "XML Input File");
// Process command line.
Teuchos::CommandLineProcessor::EParseCommandLineReturn
parseReturn= RBGen_CLP.parse( argc, argv );
if( parseReturn == Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED ) {
return 0;
}
if( parseReturn != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL ) {
#ifdef EPETRA_MPI
MPI_Finalize();
#endif
return -1; // Error!
}
// Check to make sure an XML input file was provided
TEUCHOS_TEST_FOR_EXCEPTION(xml_file == "", std::invalid_argument, "ERROR: An XML file was not provided; use --xml-file to provide an XML input file for this RBGen driver.");
Teuchos::Array<Teuchos::RCP<Teuchos::Time> > timersRBGen;
//
// ---------------------------------------------------------------
// CREATE THE INITIAL PARAMETER LIST FROM THE INPUT XML FILE
// ---------------------------------------------------------------
//
Teuchos::RCP<Teuchos::ParameterList> BasisParams = RBGen::createParams( xml_file );
if (verbose && Comm.MyPID() == 0)
{
std::cout<<"-------------------------------------------------------"<<std::endl;
std::cout<<"Input Parameter List: "<<std::endl;
std::cout<<"-------------------------------------------------------"<<std::endl;
BasisParams->print();
}
//
// ---------------------------------------------------------------
// CREATE THE FILE I/O HANDLER
// ---------------------------------------------------------------
//
// - First create the abstract factory for the file i/o handler.
//
RBGen::EpetraMVFileIOFactory fio_factory;
//
// - Then use the abstract factory to create the file i/o handler specified in the parameter list.
//
Teuchos::RCP<Teuchos::Time> timerFileIO = Teuchos::rcp( new Teuchos::Time("Create File I/O Handler") );
timersRBGen.push_back( timerFileIO );
//
Teuchos::RCP< RBGen::FileIOHandler<Epetra_MultiVector> > mvFileIO;
Teuchos::RCP< RBGen::FileIOHandler<Epetra_Operator> > opFileIO =
Teuchos::rcp( new RBGen::EpetraCrsMatrixFileIOHandler() );
{
Teuchos::TimeMonitor lcltimer( *timerFileIO );
mvFileIO = fio_factory.create( *BasisParams );
//
// Initialize file IO handlers
//
mvFileIO->Initialize( BasisParams );
opFileIO->Initialize( BasisParams );
}
if (verbose && Comm.MyPID() == 0)
{
std::cout<<"-------------------------------------------------------"<<std::endl;
std::cout<<"File I/O Handlers Generated"<<std::endl;
std::cout<<"-------------------------------------------------------"<<std::endl;
}
//
// ---------------------------------------------------------------
// READ IN THE DATA SET / SNAPSHOT SET & PREPROCESS
// ( this will be a separate abstract class type )
// ---------------------------------------------------------------
//
Teuchos::RCP<std::vector<std::string> > filenames = RBGen::genFileList( *BasisParams );
Teuchos::RCP<Teuchos::Time> timerSnapshotIn = Teuchos::rcp( new Teuchos::Time("Reading in Snapshot Set") );
timersRBGen.push_back( timerSnapshotIn );
//
Teuchos::RCP<Epetra_MultiVector> testMV;
{
Teuchos::TimeMonitor lcltimer( *timerSnapshotIn );
testMV = mvFileIO->Read( *filenames );
}
//.........这里部分代码省略.........
示例10: main
int main(int argc, char *argv[])
{
// initialize MPI and Epetra communicator
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
Teuchos::ParameterList GaleriList;
// The problem is defined on a 2D grid, global size is nx * nx.
int nx = 30;
GaleriList.set("nx", nx);
GaleriList.set("ny", nx * Comm.NumProc());
GaleriList.set("mx", 1);
GaleriList.set("my", Comm.NumProc());
Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap64("Cartesian2D", Comm, GaleriList) );
Teuchos::RefCountPtr<Epetra_RowMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
// =============================================================== //
// B E G I N N I N G O F I F P A C K C O N S T R U C T I O N //
// =============================================================== //
Teuchos::ParameterList List;
// builds an Ifpack_AdditiveSchwarz. This is templated with
// the local solvers, in this case Ifpack_ICT. Note that any
// other Ifpack_Preconditioner-derived class can be used
// instead of Ifpack_ICT.
// In this example the overlap is zero. Use
// Prec(A,OverlapLevel) for the general case.
Ifpack_AdditiveSchwarz<Ifpack_ICT> Prec(&*A);
// `1.0' means that the factorization should approximatively
// keep the same number of nonzeros per row of the original matrix.
List.set("fact: ict level-of-fill", 1.0);
// no modifications on the diagonal
List.set("fact: absolute threshold", 0.0);
List.set("fact: relative threshold", 1.0);
List.set("fact: relaxation value", 0.0);
// matrix `laplace_2d_bc' is not symmetric because of the way
// boundary conditions are imposed. We can filter the singletons,
// (that is, Dirichlet nodes) and end up with a symmetric
// matrix (as ICT requires).
List.set("schwarz: filter singletons", true);
// sets the parameters
IFPACK_CHK_ERR(Prec.SetParameters(List));
// initialize the preconditioner. At this point the matrix must
// have been FillComplete()'d, but actual values are ignored.
IFPACK_CHK_ERR(Prec.Initialize());
// Builds the preconditioners, by looking for the values of
// the matrix.
IFPACK_CHK_ERR(Prec.Compute());
// =================================================== //
// E N D O F I F P A C K C O N S T R U C T I O N //
// =================================================== //
// At this point, we need some additional objects
// to define and solve the linear system.
// defines LHS and RHS
Epetra_Vector LHS(A->OperatorDomainMap());
Epetra_Vector RHS(A->OperatorDomainMap());
LHS.PutScalar(0.0);
RHS.Random();
// need an Epetra_LinearProblem to define AztecOO solver
Epetra_LinearProblem Problem(&*A,&LHS,&RHS);
// now we can allocate the AztecOO solver
AztecOO Solver(Problem);
// specify solver
Solver.SetAztecOption(AZ_solver,AZ_cg_condnum);
Solver.SetAztecOption(AZ_output,32);
// HERE WE SET THE IFPACK PRECONDITIONER
Solver.SetPrecOperator(&Prec);
// .. and here we solve
// NOTE: with one process, the solver must converge in
// one iteration.
Solver.Iterate(1550,1e-5);
// Prints out some information about the preconditioner
cout << Prec;
#ifdef HAVE_MPI
MPI_Finalize();
#endif
//.........这里部分代码省略.........
示例11: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc, &argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
// set global dimension of the matrix to 5, could be any number
int NumGlobalElements = 5;
// create a map
Epetra_Map Map(NumGlobalElements,0,Comm);
// local number of rows
int NumMyElements = Map.NumMyElements();
// get update list
int * MyGlobalElements = Map.MyGlobalElements( );
// Create an integer vector NumNz that is used to build the Petra Matrix.
// NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation
// on this processor
int * NumNz = new int[NumMyElements];
// We are building a tridiagonal matrix where each row has (-1 2 -1)
// So we need 2 off-diagonal terms (except for the first and last equation)
for ( int i=0; i<NumMyElements; i++)
if (MyGlobalElements[i]==0 || MyGlobalElements[i] == NumGlobalElements-1)
NumNz[i] = 2;
else
NumNz[i] = 3;
// Create a Epetra_Matrix
Epetra_CrsMatrix A(Copy,Map,NumNz);
// (NOTE: constructor `Epetra_CrsMatrix A(Copy,Map,3);' was ok too.)
// Add rows one-at-a-time
// Need some vectors to help
// Off diagonal Values will always be -1, diagonal term 2
double *Values = new double[2];
Values[0] = -1.0; Values[1] = -1.0;
int *Indices = new int[2];
double two = 2.0;
int NumEntries;
for( int i=0 ; i<NumMyElements; ++i ) {
if (MyGlobalElements[i]==0) {
Indices[0] = 1;
NumEntries = 1;
} else if (MyGlobalElements[i] == NumGlobalElements-1) {
Indices[0] = NumGlobalElements-2;
NumEntries = 1;
} else {
Indices[0] = MyGlobalElements[i]-1;
Indices[1] = MyGlobalElements[i]+1;
NumEntries = 2;
}
A.InsertGlobalValues(MyGlobalElements[i], NumEntries, Values, Indices);
// Put in the diagonal entry
A.InsertGlobalValues(MyGlobalElements[i], 1, &two, MyGlobalElements+i);
}
// Finish up, trasforming the matrix entries into local numbering,
// to optimize data transfert during matrix-vector products
A.FillComplete();
// build up two distributed vectors q and z, and compute
// q = A * z
Epetra_Vector q(A.RowMap());
Epetra_Vector z(A.RowMap());
// Fill z with 1's
z.PutScalar( 1.0 );
A.Multiply(false, z, q); // Compute q = A*z
double dotProduct;
z.Dot( q, &dotProduct );
if( Comm.MyPID() == 0 )
cout << "q dot z = " << dotProduct << endl;
#ifdef HAVE_MPI
MPI_Finalize();
#endif
delete[] NumNz;
return( EXIT_SUCCESS );
} /* main */
示例12: main
int main(int argc, char *argv[]) {
int i, returnierr=0;
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
// Uncomment to debug in parallel int tmp; if (Comm.MyPID()==0) cin >> tmp; Comm.Barrier();
bool verbose = false;
bool veryVerbose = false;
// Check if we should print results to standard out
if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;
// Check if we should print lots of results to standard out
if (argc>2) if (argv[2][0]=='-' && argv[2][1]=='v') veryVerbose = true;
if (verbose && Comm.MyPID()==0)
std::cout << Epetra_Version() << std::endl << std::endl;
if (!verbose) Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
if (verbose) std::cout << Comm << std::endl << std::flush;
bool verbose1 = verbose;
if (verbose) verbose = (Comm.MyPID()==0);
bool veryVerbose1 = veryVerbose;
if (veryVerbose) veryVerbose = (Comm.MyPID()==0);
int NumMyElements = 100;
if (veryVerbose1) NumMyElements = 10;
NumMyElements += Comm.MyPID();
int MaxNumMyElements = NumMyElements+Comm.NumProc()-1;
int * ElementSizeList = new int[NumMyElements];
long long * MyGlobalElements = new long long[NumMyElements];
for (i = 0; i<NumMyElements; i++) {
MyGlobalElements[i] = (Comm.MyPID()*MaxNumMyElements+i)*2;
ElementSizeList[i] = i%6 + 2; // elementsizes go from 2 to 7
}
Epetra_BlockMap Map(-1LL, NumMyElements, MyGlobalElements, ElementSizeList,
0, Comm);
delete [] ElementSizeList;
delete [] MyGlobalElements;
Epetra_MapColoring C0(Map);
int * elementColors = new int[NumMyElements];
int maxcolor = 24;
int * colorCount = new int[maxcolor];
int ** colorLIDs = new int*[maxcolor];
for (i=0; i<maxcolor; i++) colorCount[i] = 0;
for (i=0; i<maxcolor; i++) colorLIDs[i] = 0;
int defaultColor = C0.DefaultColor();
for (i=0; i<Map.NumMyElements(); i++) {
assert(C0[i]==defaultColor);
assert(C0(Map.GID64(i))==defaultColor);
if (i%2==0) C0[i] = i%6+5+i%14; // cycle through 5...23 on even elements
else C0(Map.GID64(i)) = i%5+1; // cycle through 1...5 on odd elements
elementColors[i] = C0[i]; // Record color of ith element for use below
colorCount[C0[i]]++; // Count how many of each color for checking below
}
if (veryVerbose)
std::cout << "Original Map Coloring using element-by-element definitions" << std::endl;
if (veryVerbose1)
std::cout << C0 << std::endl;
int numColors = 0;
for (i=0; i<maxcolor; i++)
if (colorCount[i]>0) {
numColors++;
colorLIDs[i] = new int[colorCount[i]];
}
for (i=0; i<maxcolor; i++) colorCount[i] = 0;
for (i=0; i<Map.NumMyElements(); i++) colorLIDs[C0[i]][colorCount[C0[i]]++] = i;
int newDefaultColor = -1;
Epetra_MapColoring C1(Map, elementColors, newDefaultColor);
if (veryVerbose)
std::cout << "Same Map Coloring using one-time construction" << std::endl;
if (veryVerbose1)
std::cout << C1 << std::endl;
assert(C1.DefaultColor()==newDefaultColor);
for (i=0; i<Map.NumMyElements(); i++) assert(C1[i]==C0[i]);
Epetra_MapColoring C2(C1);
//.........这里部分代码省略.........
示例13: main
int main(int argc, char *argv[]) {
#ifdef HAVE_MPI
MPI_Init(&argc, &argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
// initialize an Gallery object
CrsMatrixGallery Gallery("laplace_2d", Comm, false); // CJ TODO FIXME: change for Epetra64
Gallery.Set("problem_size", 100); //must be a square number
// get pointers to the linear problem, containing matrix, LHS and RHS.
// if you need to access them, you can for example uncomment the following
// code:
// Epetra_CrsMatrix* Matrix = Gallery.GetMatrix();
// Epetra_MultiVector* LHS = Gallery.GetStartingSolution();
// Epetra_MultiVector* RHS = Gallery.GetRHS();
//
// NOTE: StartingSolution and RHS are pointers to Gallery's internally stored
// vectors. Using StartingSolution and RHS, we can verify the residual
// after the solution of the linear system. However, users may define as well
// their own vectors for solution and RHS.
Epetra_LinearProblem* Problem = Gallery.GetLinearProblem();
// initialize Amesos solver:
// `Solver' is the pointer to the Amesos solver
// (note the use of the base class Amesos_BaseSolver)
Amesos_BaseSolver* Solver;
// Amesos_Factory is the function class used to create the solver.
// This class contains no data.
Amesos Amesos_Factory;
// empty parameter list
Teuchos::ParameterList List;
// may also try: "Amesos_Umfpack", "Amesos_Lapack", ...
string SolverType = "Amesos_Klu";
Solver = Amesos_Factory.Create(SolverType, *Problem);
// Amesos_Factory returns 0 is the selected solver is not
// available
assert (Solver);
// start solving
Solver->SymbolicFactorization();
Solver->NumericFactorization();
Solver->Solve();
// verify that residual is really small
double residual, diff;
Gallery.ComputeResidual(&residual);
Gallery.ComputeDiffBetweenStartingAndExactSolutions(&diff);
if( Comm.MyPID() == 0 ) {
cout << "||b-Ax||_2 = " << residual << endl;
cout << "||x_exact - x||_2 = " << diff << endl;
}
// delete Solver
delete Solver;
if (residual > 1e-5)
exit(EXIT_FAILURE);
#ifdef HAVE_MPI
MPI_Finalize();
#endif
exit(EXIT_SUCCESS);
}
示例14: TEUCHOS_UNIT_TEST
TEUCHOS_UNIT_TEST(AndersonAcceleration, AA_Rosenbrock)
{
int status = 0;
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
TEST_ASSERT(Comm.NumProc() == 1);
::Stratimikos::DefaultLinearSolverBuilder builder;
Teuchos::RCP<Teuchos::ParameterList> p =
Teuchos::rcp(new Teuchos::ParameterList);
{
p->set("Linear Solver Type", "AztecOO");
//p->set("Preconditioner Type", "Ifpack");
p->set("Preconditioner Type", "None");
Teuchos::ParameterList& az = p->sublist("Linear Solver Types").sublist("AztecOO");
az.sublist("Forward Solve").sublist("AztecOO Settings").set("Output Frequency", 1);
az.sublist("VerboseObject").set("Verbosity Level", "high");
Teuchos::ParameterList& ip = p->sublist("Preconditioner Types").sublist("Ifpack");
ip.sublist("VerboseObject").set("Verbosity Level", "high");
}
builder.setParameterList(p);
Teuchos::RCP< ::Thyra::LinearOpWithSolveFactoryBase<double> >
lowsFactory = builder.createLinearSolveStrategy("");
Teuchos::RCP<RosenbrockModelEvaluator> thyraModel =
Teuchos::rcp(new RosenbrockModelEvaluator(Teuchos::rcp(&Comm,false)));
thyraModel->set_W_factory(lowsFactory);
// Create nox parameter list
Teuchos::RCP<Teuchos::ParameterList> nl_params =
Teuchos::rcp(new Teuchos::ParameterList);
nl_params->set("Nonlinear Solver", "Anderson Accelerated Fixed-Point");
nl_params->sublist("Anderson Parameters").set("Storage Depth", 2);
nl_params->sublist("Anderson Parameters").set("Mixing Parameter", 1.0);
nl_params->sublist("Anderson Parameters").set("Acceleration Start Iteration", 1);
nl_params->sublist("Anderson Parameters").set("Adjust Matrix for Condition Number", false);
nl_params->sublist("Anderson Parameters").sublist("Preconditioning").set("Precondition", false);
Teuchos::ParameterList& printParams = nl_params->sublist("Printing");
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::LinearSolverDetails +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::Debug +
NOX::Utils::TestDetails +
NOX::Utils::Error);
nl_params->sublist("Solver Options").set("Status Test Check Type", "Complete");
// Enable row sum scaling
nl_params->sublist("Thyra Group Options").set("Function Scaling", "Row Sum");
// Create Status Tests
{
Teuchos::ParameterList& st = nl_params->sublist("Status Tests");
st.set("Test Type", "Combo");
st.set("Combo Type", "OR");
st.set("Number of Tests", 3);
{
Teuchos::ParameterList& conv = st.sublist("Test 0");
conv.set("Test Type", "Combo");
conv.set("Combo Type", "AND");
conv.set("Number of Tests", 2);
Teuchos::ParameterList& normF_rel = conv.sublist("Test 0");
normF_rel.set("Test Type", "NormF");
normF_rel.set("Tolerance", 1.0e-8);
Teuchos::ParameterList& normWRMS = conv.sublist("Test 1");
normWRMS.set("Test Type", "NormWRMS");
normWRMS.set("Absolute Tolerance", 1.0e-8);
normWRMS.set("Relative Tolerance", 1.0e-5);
normWRMS.set("Tolerance", 1.0);
normWRMS.set("BDF Multiplier", 1.0);
normWRMS.set("Alpha", 1.0);
normWRMS.set("Beta", 0.5);
normWRMS.set("Disable Implicit Weighting", true);
}
{
Teuchos::ParameterList& fv = st.sublist("Test 1");
fv.set("Test Type", "FiniteValue");
fv.set("Vector Type", "F Vector");
fv.set("Norm Type", "Two Norm");
}
//.........这里部分代码省略.........
示例15: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
Teuchos::GlobalMPISession mpiSession(&argc, &argv, 0);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
typedef double ST;
typedef Teuchos::ScalarTraits<ST> SCT;
typedef SCT::magnitudeType MT;
typedef Epetra_MultiVector MV;
typedef Epetra_Operator OP;
typedef Belos::MultiVecTraits<ST,MV> MVT;
typedef Belos::OperatorTraits<ST,MV,OP> OPT;
using Teuchos::RCP;
using Teuchos::rcp;
bool success = true;
string pass = "End Result: TEST PASSED";
string fail = "End Result: TEST FAILED";
bool verbose = false, proc_verbose = true;
bool leftprec = false; // left preconditioning or right.
int frequency = -1; // frequency of status test output.
int blocksize = 1; // blocksize
int numrhs = 1; // number of right-hand sides to solve for
int maxrestarts = 15; // maximum number of restarts allowed
int maxsubspace = 25; // maximum number of blocks the solver can use
// for the subspace
char file_name[100];
int nProcs, myPID ;
Teuchos::RCP <Teuchos::ParameterList> pLUList ; // ParaLU parameters
Teuchos::ParameterList isoList ; // Isorropia parameters
Teuchos::ParameterList shyLUList ; // ShyLU parameters
string ipFileName = "ShyLU.xml"; // TODO : Accept as i/p
#ifdef HAVE_MPI
nProcs = mpiSession.getNProc();
myPID = Comm.MyPID();
#else
nProcs = 1;
myPID = 0;
#endif
if (myPID == 0)
{
cout <<"Parallel execution: nProcs="<< nProcs << endl;
}
// =================== Read input xml file =============================
pLUList = Teuchos::getParametersFromXmlFile(ipFileName);
isoList = pLUList->sublist("Isorropia Input");
shyLUList = pLUList->sublist("ShyLU Input");
shyLUList.set("Outer Solver Library", "Belos");
// Get matrix market file name
string MMFileName = Teuchos::getParameter<string>(*pLUList, "mm_file");
string prec_type = Teuchos::getParameter<string>(*pLUList, "preconditioner");
int maxiters = Teuchos::getParameter<int>(*pLUList, "Outer Solver MaxIters");
MT tol = Teuchos::getParameter<double>(*pLUList, "Outer Solver Tolerance");
string rhsFileName = pLUList->get<string>("rhs_file", "");
int maxFiles = pLUList->get<int>("Maximum number of files to read in", 1);
int startFile = pLUList->get<int>("Number of initial file", 1);
int file_number = startFile;
if (myPID == 0)
{
cout << "Input :" << endl;
cout << "ParaLU params " << endl;
pLUList->print(std::cout, 2, true, true);
cout << "Matrix market file name: " << MMFileName << endl;
}
if (maxFiles > 1)
{
MMFileName += "%d.mm";
sprintf( file_name, MMFileName.c_str(), file_number );
}
else
{
strcpy( file_name, MMFileName.c_str());
}
// ==================== Read input Matrix ==============================
Epetra_CrsMatrix *A;
Epetra_MultiVector *b1;
int err = EpetraExt::MatrixMarketFileToCrsMatrix(file_name, Comm, A);
if (err != 0 && myPID == 0)
{
cout << "Matrix file could not be read in!!!, info = "<< err << endl;
success = false;
}
//.........这里部分代码省略.........