本文整理汇总了C++中Epetra_SerialComm::MyPID方法的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_SerialComm::MyPID方法的具体用法?C++ Epetra_SerialComm::MyPID怎么用?C++ Epetra_SerialComm::MyPID使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Epetra_SerialComm
的用法示例。
在下文中一共展示了Epetra_SerialComm::MyPID方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Comm
//.........这里部分代码省略.........
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::LinearSolverDetails +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::Debug +
NOX::Utils::TestDetails +
NOX::Utils::Error);
nl_params->sublist("Solver Options").set("Status Test Check Type", "Complete");
// Enable row sum scaling
nl_params->sublist("Thyra Group Options").set("Function Scaling", "None");
// Create right scaling vector
Teuchos::RCP<Thyra::VectorBase<double> > values = Thyra::createMember(thyraModel->get_x_space());
Thyra::put_scalar(0.0,values.ptr());
// Values chosen such that WRMS convergence is achieved in 1 step
// Unscaled will not converge in less than 20 steps
Thyra::set_ele(0,1.0e14,values.ptr());
Thyra::set_ele(1,1.0e2,values.ptr());
Teuchos::RCP<NOX::Abstract::Vector > right_scaling = Teuchos::rcp<NOX::Abstract::Vector>(new NOX::Thyra::Vector(values));
// Create Status Tests
Teuchos::RCP<NOX::StatusTest::Generic> status_tests;
{
Teuchos::RCP<Teuchos::ParameterList> st =
Teuchos::parameterList("Status Tests");
st->set("Test Type", "Combo");
st->set("Combo Type", "OR");
st->set("Number of Tests", 3);
{
Teuchos::ParameterList& normWRMS = st->sublist("Test 0");
normWRMS.set("Test Type", "NormWRMS");
normWRMS.set("Absolute Tolerance", 1.0e-8);
normWRMS.set("Relative Tolerance", 1.0e-5);
normWRMS.set("Tolerance", 1.0);
normWRMS.set("BDF Multiplier", 1.0);
normWRMS.set("Alpha", 1.0);
normWRMS.set("Beta", 0.5);
normWRMS.set("Disable Implicit Weighting", true);
}
{
Teuchos::ParameterList& fv = st->sublist("Test 1");
fv.set("Test Type", "FiniteValue");
fv.set("Vector Type", "F Vector");
fv.set("Norm Type", "Two Norm");
}
{
Teuchos::ParameterList& maxiters = st->sublist("Test 2");
maxiters.set("Test Type", "MaxIters");
maxiters.set("Maximum Iterations", 20);
}
// Create a print class for controlling output below
nl_params->sublist("Printing").set("MyPID", Comm.MyPID());
nl_params->sublist("Printing").set("Output Precision", 3);
nl_params->sublist("Printing").set("Output Processor", 0);
NOX::Utils printing(nl_params->sublist("Printing"));
NOX::StatusTest::Factory st_factory;
status_tests = st_factory.buildStatusTests(*st,printing);
}
Teuchos::RCP< ::Thyra::VectorBase<double> >
initial_guess = thyraModel->getNominalValues().get_x()->clone_v();
Teuchos::RCP<NOX::Thyra::Vector> noxThyraRightScaleVec =
Teuchos::rcp_dynamic_cast<NOX::Thyra::Vector>(right_scaling);
Teuchos::RCP<NOX::Thyra::Group> nox_group =
Teuchos::rcp(new NOX::Thyra::Group(*initial_guess,
thyraModel,
thyraModel->create_W_op(),
lowsFactory,
thyraModel->create_W_prec(),
lowsFactory->getPreconditionerFactory(),
Teuchos::null,
noxThyraRightScaleVec->getThyraRCPVector()
));
Teuchos::RCP<NOX::Solver::Generic> solver =
NOX::Solver::buildSolver(nox_group, status_tests, nl_params);
NOX::StatusTest::StatusType solveStatus = solver->solve();
TEST_ASSERT(solveStatus == NOX::StatusTest::Converged);
// Test total num iterations
{
// Problem converges in >20 nonlinear iterations with NO scaling
// Problem converges in 1 nonlinear iterations with scaling
TEST_EQUALITY(solver->getNumIterations(), 1);
}
}
示例2: main
int main(int argc, char *argv[])
{
bool boolret;
int MyPID;
#ifdef HAVE_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
MyPID = Comm.MyPID();
bool testFailed;
bool verbose = false;
bool debug = false;
bool shortrun = false;
bool insitu = false;
std::string which("LM");
CommandLineProcessor cmdp(false,true);
cmdp.setOption("verbose","quiet",&verbose,"Print messages and results.");
cmdp.setOption("debug","nodebug",&debug,"Print debugging information.");
cmdp.setOption("insitu","exsitu",&insitu,"Perform in situ restarting.");
cmdp.setOption("sort",&which,"Targetted eigenvalues (SM or LM).");
cmdp.setOption("shortrun","longrun",&shortrun,"Allow only a small number of iterations.");
if (cmdp.parse(argc,argv) != CommandLineProcessor::PARSE_SUCCESSFUL) {
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return -1;
}
if (debug) verbose = true;
typedef double ScalarType;
typedef ScalarTraits<ScalarType> SCT;
typedef SCT::magnitudeType MagnitudeType;
typedef Anasazi::MultiVec<ScalarType> MV;
typedef Anasazi::Operator<ScalarType> OP;
typedef Anasazi::MultiVecTraits<ScalarType,MV> MVT;
typedef Anasazi::OperatorTraits<ScalarType,MV,OP> OPT;
const ScalarType ONE = SCT::one();
if (verbose && MyPID == 0) {
cout << Anasazi::Anasazi_Version() << endl << endl;
}
// Problem information
int space_dim = 1;
std::vector<double> brick_dim( space_dim );
brick_dim[0] = 1.0;
std::vector<int> elements( space_dim );
elements[0] = 100;
// Create problem
RCP<ModalProblem> testCase = rcp( new ModeLaplace1DQ1(Comm, brick_dim[0], elements[0]) );
//
// Get the stiffness and mass matrices
RCP<Epetra_CrsMatrix> K = rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()), false );
RCP<Epetra_CrsMatrix> M = rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()), false );
//
// Create solver for mass matrix
const int maxIterCG = 100;
const double tolCG = 1e-7;
RCP<BlockPCGSolver> opStiffness = rcp( new BlockPCGSolver(Comm, M.get(), tolCG, maxIterCG, 0) );
opStiffness->setPreconditioner( 0 );
RCP<const Anasazi::EpetraGenOp> InverseOp = rcp( new Anasazi::EpetraGenOp( opStiffness, K ) );
// Create the initial vectors
int blockSize = 3;
RCP<Anasazi::EpetraOpMultiVec> ivec = rcp( new Anasazi::EpetraOpMultiVec(K, K->OperatorDomainMap(), blockSize) );
ivec->MvRandom();
// Create eigenproblem
const int nev = 5;
RCP<Anasazi::BasicEigenproblem<ScalarType,MV,OP> > problem =
rcp( new Anasazi::BasicEigenproblem<ScalarType,MV,OP>(InverseOp, ivec) );
//
// Inform the eigenproblem that the operator InverseOp is Hermitian under an M inner-product
problem->setHermitian(true);
//
// Set the number of eigenvalues requested
problem->setNEV( nev );
//
// Inform the eigenproblem that you are done passing it information
boolret = problem->setProblem();
if (boolret != true) {
if (verbose && MyPID == 0) {
cout << "Anasazi::BasicEigenproblem::SetProblem() returned with error." << endl
<< "End Result: TEST FAILED" << endl;
}
#ifdef HAVE_MPI
MPI_Finalize() ;
#endif
return -1;
}
//.........这里部分代码省略.........
示例3: main
int main(int argc, char *argv[]) {
using std::cout;
using std::endl;
int i;
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
int MyPID = Comm.MyPID();
// Number of dimension of the domain
int space_dim = 2;
// Size of each of the dimensions of the domain
std::vector<double> brick_dim( space_dim );
brick_dim[0] = 1.0;
brick_dim[1] = 1.0;
// Number of elements in each of the dimensions of the domain
std::vector<int> elements( space_dim );
elements[0] = 10;
elements[1] = 10;
// Create problem
Teuchos::RCP<ModalProblem> testCase = Teuchos::rcp( new ModeLaplace2DQ2(Comm, brick_dim[0], elements[0], brick_dim[1], elements[1]) );
// Get the stiffness and mass matrices
Teuchos::RCP<Epetra_CrsMatrix> K = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()), false );
Teuchos::RCP<Epetra_CrsMatrix> M = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()), false );
//
// ************Construct preconditioner*************
//
Teuchos::ParameterList ifpackList;
// allocates an IFPACK factory. No data is associated
// to this object (only method Create()).
Ifpack Factory;
// create the preconditioner. For valid PrecType values,
// please check the documentation
std::string PrecType = "ICT"; // incomplete Cholesky
int OverlapLevel = 0; // must be >= 0. If Comm.NumProc() == 1,
// it is ignored.
Teuchos::RCP<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*K, OverlapLevel) );
assert(Prec != Teuchos::null);
// specify parameters for ICT
ifpackList.set("fact: drop tolerance", 1e-4);
ifpackList.set("fact: ict level-of-fill", 0.);
// the combine mode is on the following:
// "Add", "Zero", "Insert", "InsertAdd", "Average", "AbsMax"
// Their meaning is as defined in file Epetra_CombineMode.h
ifpackList.set("schwarz: combine mode", "Add");
// sets the parameters
IFPACK_CHK_ERR(Prec->SetParameters(ifpackList));
// initialize the preconditioner. At this point the matrix must
// have been FillComplete()'d, but actual values are ignored.
IFPACK_CHK_ERR(Prec->Initialize());
// Builds the preconditioners, by looking for the values of
// the matrix.
IFPACK_CHK_ERR(Prec->Compute());
//
//*******************************************************/
// Set up Belos Block CG operator for inner iteration
//*******************************************************/
//
int blockSize = 3; // block size used by linear solver and eigensolver [ not required to be the same ]
int maxits = K->NumGlobalRows(); // maximum number of iterations to run
//
// Create the Belos::LinearProblem
//
Teuchos::RCP<Belos::LinearProblem<double,Epetra_MultiVector,Epetra_Operator> >
My_LP = Teuchos::rcp( new Belos::LinearProblem<double,Epetra_MultiVector,Epetra_Operator>() );
My_LP->setOperator( K );
// Create the Belos preconditioned operator from the Ifpack preconditioner.
// NOTE: This is necessary because Belos expects an operator to apply the
// preconditioner with Apply() NOT ApplyInverse().
Teuchos::RCP<Epetra_Operator> belosPrec = Teuchos::rcp( new Epetra_InvOperator( Prec.get() ) );
My_LP->setLeftPrec( belosPrec );
//
// Create the ParameterList for the Belos Operator
//
Teuchos::RCP<Teuchos::ParameterList> My_List = Teuchos::rcp( new Teuchos::ParameterList() );
My_List->set( "Solver", "BlockCG" );
My_List->set( "Maximum Iterations", maxits );
My_List->set( "Block Size", 1 );
My_List->set( "Convergence Tolerance", 1e-12 );
//
// Create the Belos::EpetraOperator
//.........这里部分代码省略.........
示例4: main
int main(int argc, char *argv[])
{
using Teuchos::rcp_implicit_cast;
using std::cout;
using std::endl;
bool boolret;
int MyPID;
#ifdef HAVE_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
MyPID = Comm.MyPID();
bool testFailed = false;
bool verbose = false;
bool debug = false;
std::string filename("mhd1280b.cua");
std::string which("LR");
bool skinny = true;
bool success = true;
try {
CommandLineProcessor cmdp(false,true);
cmdp.setOption("verbose","quiet",&verbose,"Print messages and results.");
cmdp.setOption("debug","nodebug",&debug,"Print debugging information.");
cmdp.setOption("sort",&which,"Targetted eigenvalues (SR or LR).");
cmdp.setOption("skinny","hefty",&skinny,"Use a skinny (low-mem) or hefty (higher-mem) implementation of IRTR.");
if (cmdp.parse(argc,argv) != CommandLineProcessor::PARSE_SUCCESSFUL) {
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return -1;
}
#ifndef HAVE_EPETRA_THYRA
if (verbose && MyPid == 0) {
cout << "Please configure Anasazi with:" << endl;
cout << "--enable-epetra-thyra" << endl;
cout << "--enable-anasazi-thyra" << endl;
}
return 0;
#endif
typedef double ScalarType;
typedef ScalarTraits<ScalarType> SCT;
typedef SCT::magnitudeType MagnitudeType;
typedef Thyra::MultiVectorBase<double> MV;
typedef Thyra::LinearOpBase<double> OP;
typedef Anasazi::MultiVecTraits<ScalarType,MV> MVT;
typedef Anasazi::OperatorTraits<ScalarType,MV,OP> OPT;
const ScalarType ONE = SCT::one();
if (verbose && MyPID == 0) {
cout << Anasazi::Anasazi_Version() << endl << endl;
}
// Problem information
int space_dim = 1;
std::vector<double> brick_dim( space_dim );
brick_dim[0] = 1.0;
std::vector<int> elements( space_dim );
elements[0] = 100;
// Create problem
RCP<ModalProblem> testCase = rcp( new ModeLaplace1DQ1(Comm, brick_dim[0], elements[0]) );
//
// Get the stiffness and mass matrices
RCP<Epetra_CrsMatrix> K = rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()), false );
RCP<Epetra_CrsMatrix> M = rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()), false );
//
// Create the initial vectors
int blockSize = 5;
//
// Get a pointer to the Epetra_Map
RCP<const Epetra_Map> Map = rcp( &K->OperatorDomainMap(), false );
//
// create an epetra multivector
RCP<Epetra_MultiVector> ivec =
rcp( new Epetra_MultiVector(K->OperatorDomainMap(), blockSize) );
ivec->Random();
// create a Thyra::VectorSpaceBase
RCP<const Thyra::VectorSpaceBase<double> > epetra_vs =
Thyra::create_VectorSpace(Map);
// create a MultiVectorBase (from the Epetra_MultiVector)
RCP<Thyra::MultiVectorBase<double> > thyra_ivec =
Thyra::create_MultiVector(ivec, epetra_vs);
// Create Thyra LinearOpBase objects from the Epetra_Operator objects
RCP<const Thyra::LinearOpBase<double> > thyra_K =
Thyra::epetraLinearOp(K);
RCP<const Thyra::LinearOpBase<double> > thyra_M =
//.........这里部分代码省略.........
示例5: main
int main(int argc, char *argv[])
{
int ierr = 0;
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
int rank; // My process ID
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
int rank = 0;
Epetra_SerialComm Comm;
#endif
bool verbose = false;
// Check if we should print results to standard out
if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;
int verbose_int = verbose ? 1 : 0;
Comm.Broadcast(&verbose_int, 1, 0);
verbose = verbose_int==1 ? true : false;
// char tmp;
// if (rank==0) cout << "Press any key to continue..."<< std::endl;
// if (rank==0) cin >> tmp;
// Comm.Barrier();
Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
if(verbose && MyPID==0)
cout << Epetra_Version() << std::endl << std::endl;
if (verbose) cout << "Processor "<<MyPID<<" of "<< NumProc
<< " is alive."<<endl;
//bool verbose1 = verbose; // unused
// Redefine verbose to only print on PE 0
if(verbose && rank!=0) verbose = false;
int NumMyEquations = 1;
long long NumGlobalEquations = NumProc;
// Get update list and number of local equations from newly created Map
long long* MyGlobalElementsLL = new long long[NumMyEquations];
MyGlobalElementsLL[0] = 2000000000+MyPID;
// Construct a Map that puts approximately the same Number of equations on each processor
Epetra_Map MapLL(NumGlobalEquations, NumMyEquations, MyGlobalElementsLL, 0, Comm);
EPETRA_TEST_ERR(MapLL.GlobalIndicesInt(),ierr);
EPETRA_TEST_ERR(!(MapLL.GlobalIndicesLongLong()),ierr);
// Create an integer vector NumNz that is used to build the Petra Matrix.
// NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation on this processor
int* NumNzLL = new int[NumMyEquations];
NumNzLL[0] = 0;
// Create int types meant to add to long long matrix for test of failure
int* MyIntGlobalElementsLL = new int[NumMyEquations];
MyIntGlobalElementsLL[0] = 20000+MyPID;
// Create a long long Epetra_Matrix
Epetra_CrsMatrix A_LL(Copy, MapLL, NumNzLL);
EPETRA_TEST_ERR(A_LL.IndicesAreGlobal(),ierr);
EPETRA_TEST_ERR(A_LL.IndicesAreLocal(),ierr);
// Insert values
double one = 1.0;
#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
// Try to add ints which should fail and be caught as an int
try {
A_LL.InsertGlobalValues(MyIntGlobalElementsLL[0], 1, &one, MyIntGlobalElementsLL+0);
} catch(int i) {
EPETRA_TEST_ERR(!(i==-1),ierr);
}
#endif
// Add long longs which should succeed
EPETRA_TEST_ERR(!(A_LL.InsertGlobalValues(MyGlobalElementsLL[0], 1, &one, MyGlobalElementsLL+0)==0),ierr);
EPETRA_TEST_ERR(!(A_LL.IndicesAreGlobal()),ierr);
EPETRA_TEST_ERR(!(A_LL.FillComplete(false)==0),ierr);
EPETRA_TEST_ERR(!(A_LL.IndicesAreLocal()),ierr);
// Get update list and number of local equations from newly created Map
//.........这里部分代码省略.........
示例6: main
int main(int argc, char *argv[])
{
int ierr = 0;
double elapsed_time;
double total_flops;
double MFLOPs;
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm comm;
#endif
bool verbose = false;
bool summary = false;
// Check if we should print verbose results to standard out
if (argc>6) if (argv[6][0]=='-' && argv[6][1]=='v') verbose = true;
// Check if we should print verbose results to standard out
if (argc>6) if (argv[6][0]=='-' && argv[6][1]=='s') summary = true;
if(argc < 6) {
cerr << "Usage: " << argv[0]
<< " NumNodesX NumNodesY NumProcX NumProcY NumPoints [-v|-s]" << endl
<< "where:" << endl
<< "NumNodesX - Number of mesh nodes in X direction per processor" << endl
<< "NumNodesY - Number of mesh nodes in Y direction per processor" << endl
<< "NumProcX - Number of processors to use in X direction" << endl
<< "NumProcY - Number of processors to use in Y direction" << endl
<< "NumPoints - Number of points to use in stencil (5, 9 or 25 only)" << endl
<< "-v|-s - (Optional) Run in verbose mode if -v present or summary mode if -s present" << endl
<< " NOTES: NumProcX*NumProcY must equal the number of processors used to run the problem." << endl << endl
<< " Serial example:" << endl
<< argv[0] << " 16 12 1 1 25 -v" << endl
<< " Run this program in verbose mode on 1 processor using a 16 X 12 grid with a 25 point stencil."<< endl <<endl
<< " MPI example:" << endl
<< "mpirun -np 32 " << argv[0] << " 10 12 4 8 9 -v" << endl
<< " Run this program in verbose mode on 32 processors putting a 10 X 12 subgrid on each processor using 4 processors "<< endl
<< " in the X direction and 8 in the Y direction. Total grid size is 40 points in X and 96 in Y with a 9 point stencil."<< endl
<< endl;
return(1);
}
//char tmp;
//if (comm.MyPID()==0) cout << "Press any key to continue..."<< endl;
//if (comm.MyPID()==0) cin >> tmp;
//comm.Barrier();
comm.SetTracebackMode(0); // This should shut down any error traceback reporting
if (verbose && comm.MyPID()==0)
cout << Epetra_Version() << endl << endl;
if (summary && comm.MyPID()==0) {
if (comm.NumProc()==1)
cout << Epetra_Version() << endl << endl;
else
cout << endl << endl; // Print two blank line to keep output columns lined up
}
if (verbose) cout << comm <<endl;
// Redefine verbose to only print on PE 0
if (verbose && comm.MyPID()!=0) verbose = false;
if (summary && comm.MyPID()!=0) summary = false;
int numNodesX = atoi(argv[1]);
int numNodesY = atoi(argv[2]);
int numProcsX = atoi(argv[3]);
int numProcsY = atoi(argv[4]);
int numPoints = atoi(argv[5]);
if (verbose || (summary && comm.NumProc()==1)) {
cout << " Number of local nodes in X direction = " << numNodesX << endl
<< " Number of local nodes in Y direction = " << numNodesY << endl
<< " Number of global nodes in X direction = " << numNodesX*numProcsX << endl
<< " Number of global nodes in Y direction = " << numNodesY*numProcsY << endl
<< " Number of local nonzero entries = " << numNodesX*numNodesY*numPoints << endl
<< " Number of global nonzero entries = " << numNodesX*numNodesY*numPoints*numProcsX*numProcsY << endl
<< " Number of Processors in X direction = " << numProcsX << endl
<< " Number of Processors in Y direction = " << numProcsY << endl
<< " Number of Points in stencil = " << numPoints << endl << endl;
}
// Print blank line to keep output columns lined up
if (summary && comm.NumProc()>1)
cout << endl << endl << endl << endl << endl << endl << endl << endl<< endl << endl;
if (numProcsX*numProcsY!=comm.NumProc()) {
cerr << "Number of processors = " << comm.NumProc() << endl
<< " is not the product of " << numProcsX << " and " << numProcsY << endl << endl;
return(1);
}
if (numPoints!=5 && numPoints!=9 && numPoints!=25) {
cerr << "Number of points specified = " << numPoints << endl
//.........这里部分代码省略.........
示例7: main
int main(int argc, char *argv[])
{
bool boolret;
int MyPID;
#ifdef HAVE_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
MyPID = Comm.MyPID();
bool testFailed = false;
bool verbose = false;
bool debug = false;
string filename("mhd1280b.cua");
string which("LR");
bool skinny = true;
bool success = true;
try {
CommandLineProcessor cmdp(false,true);
cmdp.setOption("verbose","quiet",&verbose,"Print messages and results.");
cmdp.setOption("debug","nodebug",&debug,"Print debugging information.");
cmdp.setOption("skinny","hefty",&skinny,"Use a skinny (low-mem) or hefty (higher-mem) implementation of IRTR.");
cmdp.setOption("sort",&which,"Targetted eigenvalues (SR or LR).");
if (cmdp.parse(argc,argv) != CommandLineProcessor::PARSE_SUCCESSFUL) {
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return -1;
}
if (debug) verbose = true;
typedef double ScalarType;
typedef ScalarTraits<ScalarType> SCT;
typedef SCT::magnitudeType MagnitudeType;
typedef Epetra_MultiVector MV;
typedef Epetra_Operator OP;
typedef Anasazi::MultiVecTraits<ScalarType,MV> MVT;
typedef Anasazi::OperatorTraits<ScalarType,MV,OP> OPT;
if (verbose && MyPID == 0) {
cout << Anasazi::Anasazi_Version() << endl << endl;
}
// Problem information
int space_dim = 1;
std::vector<double> brick_dim( space_dim );
brick_dim[0] = 1.0;
std::vector<int> elements( space_dim );
elements[0] = 100;
// Create problem
RCP<ModalProblem> testCase = rcp( new ModeLaplace1DQ1(Comm, brick_dim[0], elements[0]) );
//
// Get the stiffness and mass matrices
RCP<const Epetra_CrsMatrix> K = rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()), false );
RCP<const Epetra_CrsMatrix> M = rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()), false );
const int FIRST_BS = 5;
const int SECOND_BS = 5;
const int THIRD_BS = 5;
const int NEV = FIRST_BS+SECOND_BS+THIRD_BS;
////////////////////////////////////////////////////////////////////////////////
//
// Shared parameters
RCP<Anasazi::BasicEigenproblem<ScalarType,MV,OP> > problem;
Anasazi::Eigensolution<ScalarType,MV> sol1, sol21, sol22, sol23;
RCP<const MV> cpoint;
RCP<MV> ev2 = rcp( new Epetra_MultiVector(K->OperatorDomainMap(), FIRST_BS+SECOND_BS+THIRD_BS) );
Anasazi::ReturnType returnCode;
//
// Verbosity level
int verbosity = Anasazi::Errors + Anasazi::Warnings;
if (debug) {
verbosity += Anasazi::Debug;
}
//
// Eigensolver parameters
int maxIters = 450;
MagnitudeType tol = 1.0e-8;
//
// Create parameter list to pass into the solver managers
ParameterList MyPL;
MyPL.set( "Skinny Solver", skinny);
MyPL.set( "Verbosity", verbosity );
MyPL.set( "Which", which );
MyPL.set( "Maximum Iterations", maxIters );
MyPL.set( "Convergence Tolerance", tol );
MyPL.set( "Use Locking", true );
MyPL.set( "Locking Tolerance", tol/10 );
try {
//.........这里部分代码省略.........
示例8: solvetriadmatrixwithtrilinos
void solvetriadmatrixwithtrilinos(int& nnz, int& order, int* row,
int* col, double* val, double* rhs, double* solution) {
#else
void solvetriadmatrixwithtrilinos_(int& nnz, int& order, int* row,
int* col, double* val, double* rhs, double* solution) {
#endif
try{
#ifdef _MPI
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
int i, j, ierr;
int MyPID = Comm.MyPID();
bool verbose = (MyPID == 0);
Epetra_Map RowMap(order, 0, Comm);
int NumMyElements = RowMap.NumMyElements();
int *MyGlobalElements = new int[NumMyElements];
RowMap.MyGlobalElements(&MyGlobalElements[0]);
#ifdef _MPI
int nPEs;
MPI_Comm_size(MPI_COMM_WORLD, &nPEs);
#endif
int anEst = nnz / order + 1;
Epetra_CrsMatrix A(Copy, RowMap, anEst);
for (j=0; j<nnz; ++j) {
if (RowMap.MyGID(row[j]) ) {
ierr = A.InsertGlobalValues(row[j], 1, &(val[j]), &(col[j]) );
assert(ierr >= 0);
}
}
ierr = A.FillComplete();
assert(ierr == 0);
//-------------------------------------------------------------------------
// RN_20091221: Taking care of the rhs
//-------------------------------------------------------------------------
Epetra_Vector b(RowMap);
// Inserting values into the rhs
double *MyGlobalValues = new double[NumMyElements];
for (j=0; j<NumMyElements; ++j) {
MyGlobalValues[j] = rhs[MyGlobalElements[j] ];
}
ierr = b.ReplaceGlobalValues(NumMyElements, &MyGlobalValues[0],
&MyGlobalElements[0]);
//-------------------------------------------------------------------------
// RN_20091221: Taking care of the solution
//-------------------------------------------------------------------------
Epetra_Vector x(RowMap);
Teuchos::ParameterList paramList;
Teuchos::RCP<Teuchos::ParameterList>
paramList1 = Teuchos::rcp(¶mList, false);
Teuchos::updateParametersFromXmlFile("./strat1.xml", paramList1.get() );
Teuchos::RCP<Teuchos::FancyOStream>
out = Teuchos::VerboseObjectBase::getDefaultOStream();
Stratimikos::DefaultLinearSolverBuilder linearSolverBuilder;
Teuchos::RCP<Epetra_CrsMatrix> epetraOper = Teuchos::rcp(&A, false);
Teuchos::RCP<Epetra_Vector> epetraRhs = Teuchos::rcp(&b, false);
Teuchos::RCP<Epetra_Vector> epetraSol = Teuchos::rcp(&x, false);
Teuchos::RCP<const Thyra::LinearOpBase<double> >
thyraOper = Thyra::epetraLinearOp(epetraOper);
Teuchos::RCP<Thyra::VectorBase<double> >
thyraRhs = Thyra::create_Vector(epetraRhs, thyraOper->range() );
Teuchos::RCP<Thyra::VectorBase<double> >
thyraSol = Thyra::create_Vector(epetraSol, thyraOper->domain() );
linearSolverBuilder.setParameterList(Teuchos::rcp(¶mList, false) );
Teuchos::RCP<Thyra::LinearOpWithSolveFactoryBase<double> >
lowsFactory = linearSolverBuilder.createLinearSolveStrategy("");
lowsFactory->setOStream(out);
lowsFactory->setVerbLevel(Teuchos::VERB_LOW);
Teuchos::RCP<Thyra::LinearOpWithSolveBase<double> >
lows = Thyra::linearOpWithSolve(*lowsFactory, thyraOper);
Thyra::SolveStatus<double>
status = Thyra::solve(*lows, Thyra::NOTRANS, *thyraRhs, &*thyraSol);
thyraSol = Teuchos::null;
// For debugging =)
// cout << "A: " << A << endl;
// cout << "b: " << b << endl;
//.........这里部分代码省略.........
示例9: main
int main(int argc, char *argv[])
{
// Initialize MPI
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
// Get the process ID and the total number of processors
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
// Check verbosity level
bool verbose = false;
if (argc > 1)
if (argv[1][0]=='-' && argv[1][1]=='v')
verbose = true;
// Get the number of elements from the command line
int NumGlobalElements = 0;
if ((argc > 2) && (verbose))
NumGlobalElements = atoi(argv[2]) + 1;
else if ((argc > 1) && (!verbose))
NumGlobalElements = atoi(argv[1]) + 1;
else
NumGlobalElements = 101;
// The number of unknowns must be at least equal to the
// number of processors.
if (NumGlobalElements < NumProc) {
cout << "numGlobalBlocks = " << NumGlobalElements
<< " cannot be < number of processors = " << NumProc << endl;
cout << "Test failed!" << endl;
throw "NOX Error";
}
// Create the interface between NOX and the application
// This object is derived from NOX::Epetra::Interface
Teuchos::RCP<Interface> interface =
Teuchos::rcp(new Interface(NumGlobalElements, Comm));
// Set the PDE factor (for nonlinear forcing term). This could be specified
// via user input.
interface->setPDEfactor(1000.0);
// Begin Nonlinear Solver ************************************
// Create the top level parameter list
Teuchos::RCP<Teuchos::ParameterList> IfpackParamsPtr =
Teuchos::rcp(new Teuchos::ParameterList);
// Set the printing parameters in the "Printing" sublist
Teuchos::ParameterList printParams;
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 3);
printParams.set("Output Processor", 0);
if (verbose)
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::LinearSolverDetails +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::Debug +
NOX::Utils::TestDetails +
NOX::Utils::Error);
else
printParams.set("Output Information", NOX::Utils::Error +
NOX::Utils::TestDetails);
// Create a print class for controlling output below
NOX::Utils p(printParams);
// *******************************
// Setup Test Objects
// *******************************
// Create Linear Objects
// Get the vector from the Problem
if (verbose)
p.out() << "Creating Vectors and Matrices" << endl;
Teuchos::RCP<Epetra_Vector> solution_vec =
interface->getSolution();
Teuchos::RCP<Epetra_Vector> rhs_vec =
Teuchos::rcp(new Epetra_Vector(*solution_vec));
Teuchos::RCP<Epetra_Vector> lhs_vec =
Teuchos::rcp(new Epetra_Vector(*solution_vec));
Teuchos::RCP<Epetra_CrsMatrix> jacobian_matrix =
interface->getJacobian();
//.........这里部分代码省略.........
示例10: main
//==============================================================================
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
// only one process
if (Comm.NumProc() != 1) {
#ifdef HAVE_MPI
MPI_Finalize();
#endif
if (Comm.MyPID() == 0)
cout << "Please run this test with one process only" << endl;
// return success not to break the tests
exit(EXIT_SUCCESS);
}
// ======================================================== //
// now create the famous "upper arrow" matrix, which //
// should be reordered as a "lower arrow". Sparsity pattern //
// will be printed on screen. //
// ======================================================== //
int NumPoints = 16;
#if !defined(EPETRA_NO_32BIT_GLOBAL_INDICES) || !defined(EPETRA_NO_64BIT_GLOBAL_INDICES)
Epetra_Map Map(-1,NumPoints,0,Comm);
#else
Epetra_Map Map;
#endif
std::vector<int> Indices(NumPoints);
std::vector<double> Values(NumPoints);
Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( new Epetra_CrsMatrix(Copy,Map,0) );
for (int i = 0 ; i < NumPoints ; ++i) {
int NumEntries;
if (i == 0) {
NumEntries = NumPoints;
for (int j = 0 ; j < NumPoints ; ++j) {
Indices[j] = j;
Values[j] = 1.0;
}
}
else {
NumEntries = 2;
Indices[0] = 0;
Indices[1] = i;
Values[0] = 1.0;
Values[1] = 1.0;
}
#if !defined(EPETRA_NO_32BIT_GLOBAL_INDICES) || !defined(EPETRA_NO_64BIT_GLOBAL_INDICES)
A->InsertGlobalValues(i, NumEntries, &Values[0], &Indices[0]);
#endif
}
A->FillComplete();
// print the sparsity to file, postscript format
////Ifpack_PrintSparsity(A,"OrigA.ps");
// create the reordering...
Teuchos::RefCountPtr<Ifpack_RCMReordering> Reorder = Teuchos::rcp( new Ifpack_RCMReordering() );
// and compute is on A
IFPACK_CHK_ERR(Reorder->Compute(*A));
// cout information
cout << *Reorder;
// create a reordered matrix
Ifpack_ReorderFilter ReordA(A, Reorder);
// print the sparsity to file, postscript format
////Ifpack_PrintSparsity(ReordA,"ReordA.ps");
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return(EXIT_SUCCESS);
}
示例11: main
//.........这里部分代码省略.........
// bifurcationList.set("Include UV In Preconditioner", true);
// //bifurcationList.set("Use P For Preconditioner", true);
// bifurcationList.set("Preconditioner Method", "SMW");
bifurcationList.set("Formulation", "Moore-Spence");
bifurcationList.set("Solver Method", "Phipps Bordering"); // better for nearly singular matrices
// bifurcationList.set("Solver Method", "Salinger Bordering");
bifurcationList.set("Initial Null Vector", nullVec);
bifurcationList.set("Length Normalization Vector", nullVec);
// Create the sublist for the predictor
Teuchos::ParameterList& predictorList = locaParamsList.sublist("Predictor");
predictorList.set("Method", "Secant"); // Default
// predictorList.set("Method", "Constant"); // Other options
// predictorList.set("Method", "Tangent"); // Other options
// Create step size sublist
Teuchos::ParameterList& stepSizeList = locaParamsList.sublist("Step Size");
stepSizeList.set("Method", "Adaptive"); // Default
stepSizeList.set("Initial Step Size", 0.1); // Should set
stepSizeList.set("Min Step Size", 1.0e-6); // Should set
stepSizeList.set("Max Step Size", 1.0); // Should set
stepSizeList.set("Aggressiveness", 0.1);
// Set up NOX info
Teuchos::ParameterList& nlParams = ParamList->sublist("NOX");
// Set the nonlinear solver method
nlParams.set("Nonlinear Solver", "Line Search Based");
// Set the printing parameters in the "Printing" sublist. This list determines how much
// of the NOX information is output
Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
printParams.set("MyPID", Comm.MyPID());
printParams.set("Output Precision", 5);
printParams.set("Output Processor", 0);
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::LinearSolverDetails +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::StepperIteration +
NOX::Utils::StepperDetails +
NOX::Utils::StepperParameters);
// NOX parameters - Sublist for line search
Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search");
searchParams.set("Method", "Backtrack");
// searchParams.set("Method", "Full Step");
// Sublist for direction
Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
dirParams.set("Method", "Newton");
Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton");
newtonParams.set("Forcing Term Method", "Constant");
// Sublist for linear solver for the Newton method
Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver");
lsParams.set("Aztec Solver", "GMRES");
lsParams.set("Max Iterations", 800);
lsParams.set("Tolerance", 1e-8);
lsParams.set("Output Frequency", 1);
示例12: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
CommandLineProcessor CLP;
int n = 10;
int m = (int)pow((double)Comm.NumProc(), 0.3334);
double DampingFactor = 1.333;
std::string AggregationScheme = "Uncoupled";
int NPA = 16;
int MaxLevels = 5;
CLP.setOption("l", &MaxLevels, "number of levels");
CLP.setOption("n", &n, "number of nodes along each axis");
CLP.setOption("damp", &DampingFactor, "prolongator damping factor");
CLP.setOption("aggr", &AggregationScheme, "aggregation scheme");
CLP.setOption("npa", &NPA, "nodes per aggregate (if supported by the aggregation scheme)");
CLP.throwExceptions(false);
CLP.parse(argc,argv);
if (m * m * m != Comm.NumProc()) {
if (Comm.MyPID() == 0)
{
std::cout << "Number of processes must be a perfect cube." << std::endl;
std::cout << "Please re-run with --help option for details." << std::endl;
}
#ifdef HAVE_MPI
MPI_Finalize();
#endif
exit(EXIT_SUCCESS);
}
n *= m;
Laplace3D A(Comm, n, n, n, m, m, m, true);
Epetra_Vector LHS(A.OperatorDomainMap());
Epetra_Vector RHS(A.OperatorDomainMap());
LHS.Random();
RHS.PutScalar(0.0);
Epetra_LinearProblem Problem(&A, &LHS, &RHS);
// Construct a solver object for this problem
AztecOO solver(Problem);
// create a parameter list for ML options
ParameterList MLList;
// set defaults for classic smoothed aggregation
ML_Epetra::SetDefaults("SA",MLList);
// overwrite some parameters. Please refer to the user's guide
// for more information
MLList.set("max levels",MaxLevels);
MLList.set("increasing or decreasing","increasing");
MLList.set("aggregation: type", AggregationScheme);
MLList.set("aggregation: damping factor", DampingFactor);
MLList.set("aggregation: nodes per aggregate", NPA);
MLList.set("smoother: type","symmetric Gauss-Seidel");
MLList.set("smoother: pre or post", "both");
MLList.set("coarse: max size", 512);
MLList.set("coarse: type","Amesos-KLU");
MLList.set("analyze memory", true);
MLList.set("repartition: enable", true);
MLList.set("repartition: max min ratio", 1.1);
MLList.set("repartition: min per proc", 512);
MLList.set("low memory usage", true);
MLList.set("x-coordinates", (double*) A.XCoord());
MLList.set("y-coordinates", (double*) A.YCoord());
MLList.set("z-coordinates", (double*) A.ZCoord());
// create the preconditioner object and compute hierarchy
MultiLevelPreconditioner* MLPrec = new MultiLevelPreconditioner(A, MLList);
// tell AztecOO to use this preconditioner, then solve
solver.SetPrecOperator(MLPrec);
solver.SetAztecOption(AZ_solver, AZ_cg_condnum);
solver.SetAztecOption(AZ_output, 1);
solver.Iterate(500, 1e-10);
delete MLPrec;
double norm;
LHS.Norm2(&norm);
if (Comm.MyPID() == 0)
std::cout << "Norm of the error = " << norm << std::endl;
#ifdef HAVE_MPI
MPI_Finalize() ;
#endif
//.........这里部分代码省略.........
示例13: main
int main(int argc, char *argv[]) {
int i;
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
int MyPID = Comm.MyPID();
// Number of dimension of the domain
int space_dim = 2;
// Size of each of the dimensions of the domain
std::vector<double> brick_dim( space_dim );
brick_dim[0] = 1.0;
brick_dim[1] = 1.0;
// Number of elements in each of the dimensions of the domain
std::vector<int> elements( space_dim );
elements[0] = 10;
elements[1] = 10;
// Create problem
Teuchos::RCP<ModalProblem> testCase = Teuchos::rcp( new ModeLaplace2DQ2(Comm, brick_dim[0], elements[0], brick_dim[1], elements[1]) );
// Get the stiffness and mass matrices
Teuchos::RCP<Epetra_CrsMatrix> K = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()), false );
Teuchos::RCP<Epetra_CrsMatrix> M = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()), false );
//
// *******************************************************
// Set up Amesos direct solver for inner iteration
// *******************************************************
//
// Create the shifted system K - sigma * M.
// For the buckling transformation, this shift must be nonzero.
double sigma = 1.0;
Epetra_CrsMatrix Kcopy( *K );
int addErr = EpetraExt::MatrixMatrix::Add( *M, false, -sigma, Kcopy, 1.0 );
if (addErr != 0) {
if (MyPID == 0) {
std::cout << "EpetraExt::MatrixMatrix::Add returned with error: " << addErr << std::endl;
}
#ifdef HAVE_MPI
MPI_Finalize() ;
#endif
return -1;
}
// Create Epetra linear problem class to solve "x = b"
Epetra_LinearProblem AmesosProblem;
AmesosProblem.SetOperator(&Kcopy);
// Create Amesos factory and solver for solving "(K - sigma*M)x = b" using a direct factorization
Amesos amesosFactory;
Teuchos::RCP<Amesos_BaseSolver> AmesosSolver =
Teuchos::rcp( amesosFactory.Create( "Klu", AmesosProblem ) );
// The AmesosBucklingOp class assumes that the symbolic/numeric factorizations have already
// been performed on the linear problem.
AmesosSolver->SymbolicFactorization();
AmesosSolver->NumericFactorization();
//
// ************************************
// Start the block Arnoldi iteration
// ************************************
//
// Variables used for the Block Arnoldi Method
//
int nev = 10;
int blockSize = 3;
int numBlocks = 3*nev/blockSize;
int maxRestarts = 5;
//int step = 5;
double tol = 1.0e-8;
std::string which = "LM";
int verbosity = Anasazi::Errors + Anasazi::Warnings + Anasazi::FinalSummary;
//
// Create parameter list to pass into solver
//
Teuchos::ParameterList MyPL;
MyPL.set( "Verbosity", verbosity );
MyPL.set( "Which", which );
MyPL.set( "Block Size", blockSize );
MyPL.set( "Num Blocks", numBlocks );
MyPL.set( "Maximum Restarts", maxRestarts );
MyPL.set( "Convergence Tolerance", tol );
//MyPL.set( "Step Size", step );
typedef Epetra_MultiVector MV;
typedef Epetra_Operator OP;
typedef Anasazi::MultiVecTraits<double, MV> MVT;
//.........这里部分代码省略.........
示例14: main
int main(int argc, char *argv[]) {
int ierr=0, returnierr=0;
#ifdef EPETRA_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
bool verbose = false;
// Check if we should print results to standard out
if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;
if (!verbose) {
Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
}
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
if (verbose && MyPID==0)
cout << Epetra_Version() << endl << endl;
if (verbose) cout << Comm << endl;
bool verbose1 = verbose;
if (verbose) verbose = (MyPID==0);
int NumMyElements = 10000;
int NumMyElements1 = NumMyElements; // Used for local map
int NumGlobalElements = NumMyElements*NumProc+EPETRA_MIN(NumProc,3);
if (MyPID < 3) NumMyElements++;
int IndexBase = 0;
bool DistributedGlobal = (NumGlobalElements>NumMyElements);
Epetra_Map* Map;
// Test exceptions
if (verbose)
cout << "*******************************************************************************************" << endl
<< " Testing Exceptions (Expect error messages if EPETRA_NO_ERROR_REPORTS is not defined" << endl
<< "*******************************************************************************************" << endl
<< endl << endl;
try {
if (verbose) cout << "Checking Epetra_Map(-2, IndexBase, Comm)" << endl;
Map = new Epetra_Map(-2, IndexBase, Comm);
}
catch (int Error) {
if (Error!=-1) {
if (Error!=0) {
EPETRA_TEST_ERR(Error,returnierr);
if (verbose) cout << "Error code should be -1" << endl;
}
else {
cout << "Error code = " << Error << "Should be -1" << endl;
returnierr+=1;
}
}
else if (verbose) cout << "Checked OK\n\n" << endl;
}
try {
if (verbose) cout << "Checking Epetra_Map(2, 3, IndexBase, Comm)" << endl;
Map = new Epetra_Map(2, 3, IndexBase, Comm);
}
catch (int Error) {
if (Error!=-4) {
if (Error!=0) {
EPETRA_TEST_ERR(Error,returnierr);
if (verbose) cout << "Error code should be -4" << endl;
}
else {
cout << "Error code = " << Error << "Should be -4" << endl;
returnierr+=1;
}
}
else if (verbose) cout << "Checked OK\n\n" << endl;
}
if (verbose) cerr << flush;
if (verbose) cout << flush;
Comm.Barrier();
if (verbose)
cout << endl << endl
<< "*******************************************************************************************" << endl
<< " Testing valid constructor now......................................................" << endl
<< "*******************************************************************************************" << endl
<< endl << endl;
// Test Epetra-defined uniform linear distribution constructor
Map = new Epetra_Map(NumGlobalElements, IndexBase, Comm);
if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, IndexBase, Comm)" << endl;
ierr = checkmap(*Map, NumGlobalElements, NumMyElements, 0,
IndexBase, Comm, DistributedGlobal);
//.........这里部分代码省略.........
示例15: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
Teuchos::CommandLineProcessor clp(false);
clp.setDocString("This is the canonical ML scaling example");
//Problem
std::string optMatrixType = "Laplace2D"; clp.setOption("matrixType", &optMatrixType, "matrix type ('Laplace2D', 'Laplace3D')");
int optNx = 100; clp.setOption("nx", &optNx, "mesh size in x direction");
int optNy = -1; clp.setOption("ny", &optNy, "mesh size in y direction");
int optNz = -1; clp.setOption("nz", &optNz, "mesh size in z direction");
//Smoothers
//std::string optSmooType = "Chebshev"; clp.setOption("smooType", &optSmooType, "smoother type ('l1-sgs', 'sgs 'or 'cheby')");
int optSweeps = 3; clp.setOption("sweeps", &optSweeps, "Chebyshev degreee (or SGS sweeps)");
double optAlpha = 7; clp.setOption("alpha", &optAlpha, "Chebyshev eigenvalue ratio (recommend 7 in 2D, 20 in 3D)");
//Coarsening
int optMaxCoarseSize = 500; clp.setOption("maxcoarse", &optMaxCoarseSize, "Size of coarsest grid when coarsening should stop");
int optMaxLevels = 10; clp.setOption("maxlevels", &optMaxLevels, "Maximum number of levels");
//Krylov solver
double optTol = 1e-12; clp.setOption("tol", &optTol, "stopping tolerance for Krylov method");
int optMaxIts = 500; clp.setOption("maxits", &optMaxIts, "maximum iterations for Krylov method");
//XML file with additional options
std::string xmlFile = ""; clp.setOption("xml", &xmlFile, "XML file containing ML options. [OPTIONAL]");
//Debugging
int optWriteMatrices = -2; clp.setOption("write", &optWriteMatrices, "write matrices to file (-1 means all; i>=0 means level i)");
switch (clp.parse(argc, argv)) {
case Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED: return EXIT_SUCCESS; break;
case Teuchos::CommandLineProcessor::PARSE_ERROR:
case Teuchos::CommandLineProcessor::PARSE_UNRECOGNIZED_OPTION: return EXIT_FAILURE; break;
case Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL: break;
}
#ifdef ML_SCALING
const int ntimers=4;
enum {total, probBuild, precBuild, solve};
ml_DblLoc timeVec[ntimers], maxTime[ntimers], minTime[ntimers];
for (int i=0; i<ntimers; i++) timeVec[i].rank = Comm.MyPID();
timeVec[total].value = MPI_Wtime();
#endif
// Creates the linear problem using the Galeri package.
// Several matrix examples are supported; please refer to the
// Galeri documentation for more details.
// Most of the examples using the ML_Epetra::MultiLevelPreconditioner
// class are based on Epetra_CrsMatrix. Example
// `ml_EpetraVbr.cpp' shows how to define a Epetra_VbrMatrix.
// `Laplace2D' is a symmetric matrix; an example of non-symmetric
// matrices is `Recirc2D' (advection-diffusion in a box, with
// recirculating flow). The grid has optNx x optNy nodes, divided into
// mx x my subdomains, each assigned to a different processor.
if (optNy == -1) optNy = optNx;
if (optNz == -1) optNz = optNx;
ParameterList GaleriList;
GaleriList.set("nx", optNx);
GaleriList.set("ny", optNy);
GaleriList.set("nz", optNz);
//GaleriList.set("mx", 1);
//GaleriList.set("my", Comm.NumProc());
#ifdef ML_SCALING
timeVec[probBuild].value = MPI_Wtime();
#endif
Epetra_Map* Map;
Epetra_CrsMatrix* A;
Epetra_MultiVector* Coord;
if (optMatrixType == "Laplace2D") {
Map = CreateMap("Cartesian2D", Comm, GaleriList);
A = CreateCrsMatrix("Laplace2D", Map, GaleriList);
Coord = CreateCartesianCoordinates("2D", &(A->Map()), GaleriList);
} else if (optMatrixType == "Laplace3D") {
Map = CreateMap("Cartesian3D", Comm, GaleriList);
A = CreateCrsMatrix("Laplace3D", Map, GaleriList);
Coord = CreateCartesianCoordinates("3D", &(A->Map()), GaleriList);
} else {
throw(std::runtime_error("Bad matrix type"));
}
//EpetraExt::RowMatrixToMatlabFile("A.m",*A);
double *x_coord = (*Coord)[0];
double *y_coord = (*Coord)[1];
double* z_coord=NULL;
//.........这里部分代码省略.........