本文整理汇总了C++中Epetra_SerialComm::NumProc方法的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_SerialComm::NumProc方法的具体用法?C++ Epetra_SerialComm::NumProc怎么用?C++ Epetra_SerialComm::NumProc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Epetra_SerialComm
的用法示例。
在下文中一共展示了Epetra_SerialComm::NumProc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[])
{
Teuchos::GlobalMPISession mpiSession(&argc, &argv);
bool success = false;
bool verbose = false;
try {
// Parse the command line
using Teuchos::CommandLineProcessor;
CommandLineProcessor clp;
clp.throwExceptions(false);
clp.addOutputSetupOptions(true);
clp.setOption( "v", "disable-verbosity", &verbose, "Enable verbosity" );
CommandLineProcessor::EParseCommandLineReturn
parse_return = clp.parse(argc,argv,&std::cerr);
if( parse_return != CommandLineProcessor::PARSE_SUCCESSFUL )
return parse_return;
if (verbose)
std::cout << "Verbosity Activated" << std::endl;
else
std::cout << "Verbosity Disabled" << std::endl;
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
const int num_elements = 400;
// Check we have only one processor since this problem doesn't work
// for more than one proc
if (Comm.NumProc() > num_elements) {
std::cerr << "Error! Number of elements must be greate than number of processors!"
<< std::endl;
return EXIT_FAILURE;
}
// Create the model evaluator object
double paramC = 0.99;
Teuchos::RCP<ModelEvaluatorHeq<double> > model =
modelEvaluatorHeq<double>(Teuchos::rcp(&Comm,false),num_elements,paramC);
::Stratimikos::DefaultLinearSolverBuilder builder;
Teuchos::RCP<Teuchos::ParameterList> p =
Teuchos::rcp(new Teuchos::ParameterList);
p->set("Linear Solver Type", "AztecOO");
p->sublist("Linear Solver Types").sublist("AztecOO").sublist("Forward Solve").sublist("AztecOO Settings").set("Output Frequency",20);
p->set("Preconditioner Type", "Ifpack");
builder.setParameterList(p);
Teuchos::RCP< ::Thyra::LinearOpWithSolveFactoryBase<double> >
lowsFactory = builder.createLinearSolveStrategy("");
model->set_W_factory(lowsFactory);
// Create the initial guess
Teuchos::RCP< ::Thyra::VectorBase<double> >
initial_guess = model->getNominalValues().get_x()->clone_v();
Thyra::V_S(initial_guess.ptr(),Teuchos::ScalarTraits<double>::one());
Teuchos::RCP<NOX::Thyra::Group> nox_group =
Teuchos::rcp(new NOX::Thyra::Group(*initial_guess, model));
//Teuchos::rcp(new NOX::Thyra::Group(*initial_guess, model, model->create_W_op(), lowsFactory, Teuchos::null, Teuchos::null));
nox_group->computeF();
// Create the NOX status tests and the solver
// Create the convergence tests
Teuchos::RCP<NOX::StatusTest::NormF> absresid =
Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8));
Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms =
Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8));
Teuchos::RCP<NOX::StatusTest::Combo> converged =
Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND));
converged->addStatusTest(absresid);
converged->addStatusTest(wrms);
Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters =
Teuchos::rcp(new NOX::StatusTest::MaxIters(20));
Teuchos::RCP<NOX::StatusTest::FiniteValue> fv =
Teuchos::rcp(new NOX::StatusTest::FiniteValue);
Teuchos::RCP<NOX::StatusTest::Combo> combo =
Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR));
combo->addStatusTest(fv);
combo->addStatusTest(converged);
combo->addStatusTest(maxiters);
// Create nox parameter list
Teuchos::RCP<Teuchos::ParameterList> nl_params =
Teuchos::rcp(new Teuchos::ParameterList);
nl_params->set("Nonlinear Solver", "Anderson Accelerated Fixed-Point");
nl_params->sublist("Anderson Parameters").set("Storage Depth", 5);
nl_params->sublist("Anderson Parameters").set("Mixing Parameter", 1.0);
nl_params->sublist("Anderson Parameters").set("Acceleration Start Iteration", 5);
//.........这里部分代码省略.........
示例2: main
int main(int argc, char *argv[])
{
int ierr = 0;
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init(&argc, &argv);
int rank; // My process ID
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
int rank = 0;
Epetra_SerialComm Comm;
#endif
bool verbose = false;
// Check if we should print results to standard out
if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;
int verbose_int = verbose ? 1 : 0;
Comm.Broadcast(&verbose_int, 1, 0);
verbose = verbose_int==1 ? true : false;
Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
if(verbose && MyPID==0)
std::cout << Epetra_Version() << std::endl << std::endl;
if (verbose) std::cout << "Processor "<<MyPID<<" of "<< NumProc
<< " is alive."<< std::endl;
// unused: bool verbose1 = verbose;
// Redefine verbose to only print on PE 0
if(verbose && rank!=0)
verbose = false;
if (verbose) std::cout << "Test the memory management system of the class CrsMatrix (memory leak, invalid free)" << std::endl;
//
// Test 1: code initially proposed to illustrate bug #5499
//
if(Comm.NumProc() == 1) { // this is a sequential test
if (verbose) std::cout << "* Using Copy, ColMap, Variable number of indices per row and Static profile (cf. bug #5499)." << std::endl;
// Row Map
Epetra_Map RowMap(2, 0, Comm);
// ColMap
std::vector<int> colids(2);
colids[0]=0;
colids[1]=1;
Epetra_Map ColMap(-1, 2, &colids[0], 0, Comm);
// NumEntriesPerRow
std::vector<int> NumEntriesPerRow(2);
NumEntriesPerRow[0]=2;
NumEntriesPerRow[1]=2;
// Test
Epetra_CrsMatrix A(Copy, RowMap, ColMap, &NumEntriesPerRow[0], true);
// Bug #5499 shows up because InsertGlobalValues() is not called (CrsMatrix::Values_ not allocated but freed)
A.FillComplete();
}
//
// Test 1 Bis: same as Test1, but without ColMap and variable number of indices per row. Does not seems to matter
//
if(Comm.NumProc() == 1) { // this is a sequential test
if (verbose) std::cout << "* Using Copy, Fixed number of indices per row and Static profile" << std::endl;
Epetra_Map RowMap(2, 0, Comm);
// Test
Epetra_CrsMatrix A(Copy, RowMap, 1, true);
// Bug #5499 shows up because InsertGlobalValues() is not called (CrsMatrix::Values_ not allocated but freed)
A.FillComplete();
}
//
// Test 2: same as Test 1 Bis but with one call to InsertGlobalValues.
//
if(Comm.NumProc() == 1) {
//.........这里部分代码省略.........
示例3: main
int main(int argc, char *argv[])
{
// Initialize MPI
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
// Get the process ID and the total number of processors
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
// Check verbosity level
bool verbose = false;
if (argc > 1)
if (argv[1][0]=='-' && argv[1][1]=='v')
verbose = true;
// Get the number of elements from the command line
int NumGlobalElements = 0;
if ((argc > 2) && (verbose))
NumGlobalElements = atoi(argv[2]) + 1;
else if ((argc > 1) && (!verbose))
NumGlobalElements = atoi(argv[1]) + 1;
else
NumGlobalElements = 101;
// The number of unknowns must be at least equal to the
// number of processors.
if (NumGlobalElements < NumProc) {
cout << "numGlobalBlocks = " << NumGlobalElements
<< " cannot be < number of processors = " << NumProc << endl;
cout << "Test failed!" << endl;
throw "NOX Error";
}
// Create the interface between NOX and the application
// This object is derived from NOX::Epetra::Interface
Teuchos::RCP<Interface> interface =
Teuchos::rcp(new Interface(NumGlobalElements, Comm));
// Set the PDE factor (for nonlinear forcing term). This could be specified
// via user input.
interface->setPDEfactor(1000.0);
// Use a scaled vector space. The scaling must also be registered
// with the linear solver so the linear system is consistent!
Teuchos::RCP<Epetra_Vector> scaleVec =
Teuchos::rcp(new Epetra_Vector( *(interface->getSolution())));
scaleVec->PutScalar(2.0);
Teuchos::RCP<NOX::Epetra::Scaling> scaling =
Teuchos::rcp(new NOX::Epetra::Scaling);
scaling->addUserScaling(NOX::Epetra::Scaling::Left, scaleVec);
// Use a weighted vector space for scaling all norms
Teuchos::RCP<NOX::Epetra::VectorSpace> weightedVectorSpace =
Teuchos::rcp(new NOX::Epetra::VectorSpaceScaledL2(scaling));
// Get the vector from the Problem
Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
Teuchos::RCP<NOX::Epetra::Vector> noxSoln =
Teuchos::rcp(new NOX::Epetra::Vector(soln,
NOX::Epetra::Vector::CreateCopy,
NOX::DeepCopy,
weightedVectorSpace));
// Begin Nonlinear Solver ************************************
// Create the top level parameter list
Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
Teuchos::rcp(new Teuchos::ParameterList);
Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());
// Set the nonlinear solver method
nlParams.set("Nonlinear Solver", "Line Search Based");
// Set the printing parameters in the "Printing" sublist
Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 3);
printParams.set("Output Processor", 0);
if (verbose)
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::LinearSolverDetails +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::Debug +
NOX::Utils::TestDetails +
NOX::Utils::Error);
//.........这里部分代码省略.........
示例4: main
int main(int argc, char *argv[])
{
int ierr = 0;
// scale factor to test arc-length scaling
double scale = 1.0;
// Initialize MPI
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
// Get the process ID and the total number of processors
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
// Get the number of elements from the command line
int NumGlobalElements = 100 + 1;
// The number of unknowns must be at least equal to the
// number of processors.
if (NumGlobalElements < NumProc) {
cout << "numGlobalBlocks = " << NumGlobalElements
<< " cannot be < number of processors = " << NumProc << endl;
exit(1);
}
// Create the FiniteElementProblem class. This creates all required
// Epetra objects for the problem and allows calls to the
// function (RHS) and Jacobian evaluation routines.
FiniteElementProblem Problem(NumGlobalElements, Comm, scale);
// Get the vector from the Problem
Epetra_Vector& soln = Problem.getSolution();
// Initialize Solution
soln.PutScalar(1.0);
// Begin LOCA Solver ************************************
// Create parameter list
Teuchos::RCP<Teuchos::ParameterList> paramList =
Teuchos::rcp(new Teuchos::ParameterList);
// Create LOCA sublist
Teuchos::ParameterList& locaParamsList = paramList->sublist("LOCA");
// Create the stepper sublist and set the stepper parameters
Teuchos::ParameterList& locaStepperList = locaParamsList.sublist("Stepper");
locaStepperList.set("Continuation Method", "Arc Length");
locaStepperList.set("Bordered Solver Method", "Householder");
locaStepperList.set("Number of Continuation Parameters", 2);
locaStepperList.set("Epsilon", 0.1);
locaStepperList.set("Max Charts", 10000);
locaStepperList.set("Verbosity", 1);
locaStepperList.set("Page Charts", 1);
locaStepperList.set("Dump Polyhedra", true);
locaStepperList.set("Dump Centers", false);
locaStepperList.set("Filename", "MFresults");
locaStepperList.set("Enable Arc Length Scaling", false);
locaStepperList.set("Max Nonlinear Iterations", 15);
locaStepperList.set("Aggressiveness", 0.0);
locaStepperList.set("Max Solution Component", 6.0);
// Create sublist for each continuation parameter
Teuchos::ParameterList& paramList1 =
locaStepperList.sublist("Continuation Parameter 1");
paramList1.set("Parameter Name", "Right BC");
paramList1.set("Initial Value", 0.1);
paramList1.set("Max Value", 4.0);
paramList1.set("Min Value", 0.0);
paramList1.set("Initial Step Size", 0.1);
paramList1.set("Max Step Size", 0.2);
paramList1.set("Min Step Size", 1.0e-3);
Teuchos::ParameterList& paramList2 =
locaStepperList.sublist("Continuation Parameter 2");
paramList2.set("Parameter Name", "Nonlinear Factor");
paramList2.set("Initial Value", 1.0);
paramList2.set("Max Value", 4.0);
paramList2.set("Min Value", 0.0);
paramList2.set("Initial Step Size", 0.1);
paramList2.set("Max Step Size", 0.2);
paramList2.set("Min Step Size", 1.0e-3);
// Create predictor sublist
Teuchos::ParameterList& predictorList = locaParamsList.sublist("Predictor");
predictorList.set("Method", "Tangent");
// Create the "Solver" parameters sublist to be used with NOX Solvers
Teuchos::ParameterList& nlParams = paramList->sublist("NOX");
// Create the NOX printing parameter list
//.........这里部分代码省略.........
示例5: comm
TEUCHOS_UNIT_TEST( EpetraOperatorWrapper, basic )
{
#ifdef HAVE_MPI
Epetra_MpiComm comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm comm;
#endif
out << "\nRunning on " << comm.NumProc() << " processors\n";
int nx = 39; // essentially random values
int ny = 53;
out << "Using Trilinos_Util to create test matrices\n";
// create some big blocks to play with
Trilinos_Util::CrsMatrixGallery FGallery("recirc_2d",comm);
FGallery.Set("nx",nx);
FGallery.Set("ny",ny);
RCP<Epetra_CrsMatrix> F = rcp(FGallery.GetMatrix(),false);
Trilinos_Util::CrsMatrixGallery CGallery("laplace_2d",comm);
CGallery.Set("nx",nx);
CGallery.Set("ny",ny);
RCP<Epetra_CrsMatrix> C = rcp(CGallery.GetMatrix(),false);
Trilinos_Util::CrsMatrixGallery BGallery("diag",comm);
BGallery.Set("nx",nx*ny);
BGallery.Set("a",5.0);
RCP<Epetra_CrsMatrix> B = rcp(BGallery.GetMatrix(),false);
Trilinos_Util::CrsMatrixGallery BtGallery("diag",comm);
BtGallery.Set("nx",nx*ny);
BtGallery.Set("a",3.0);
RCP<Epetra_CrsMatrix> Bt = rcp(BtGallery.GetMatrix(),false);
// load'em up in a thyra operator
out << "Building block2x2 Thyra matrix ... wrapping in EpetraOperatorWrapper\n";
const RCP<const LinearOpBase<double> > A =
Thyra::block2x2<double>(
Thyra::epetraLinearOp(F),
Thyra::epetraLinearOp(Bt),
Thyra::epetraLinearOp(B),
Thyra::epetraLinearOp(C),
"A"
);
const RCP<Thyra::EpetraOperatorWrapper> epetra_A =
rcp(new Thyra::EpetraOperatorWrapper(A));
// begin the tests!
const Epetra_Map & rangeMap = epetra_A->OperatorRangeMap();
const Epetra_Map & domainMap = epetra_A->OperatorDomainMap();
// check to see that the number of global elements is correct
TEST_EQUALITY(rangeMap.NumGlobalElements(), 2*nx*ny);
TEST_EQUALITY(domainMap.NumGlobalElements(), 2*nx*ny);
// largest global ID should be one less then the # of elements
TEST_EQUALITY(rangeMap.NumGlobalElements()-1, rangeMap.MaxAllGID());
TEST_EQUALITY(domainMap.NumGlobalElements()-1, domainMap.MaxAllGID());
// create a vector to test: copyThyraIntoEpetra
{
const RCP<VectorBase<double> > tv = Thyra::createMember(A->domain());
Thyra::randomize(-100.0, 100.0, tv.ptr());
const RCP<const VectorBase<double> > tv_0 =
Thyra::productVectorBase<double>(tv)->getVectorBlock(0);
const RCP<const VectorBase<double> > tv_1 =
Thyra::productVectorBase<double>(tv)->getVectorBlock(1);
const Thyra::ConstDetachedSpmdVectorView<double> vv_0(tv_0);
const Thyra::ConstDetachedSpmdVectorView<double> vv_1(tv_1);
int off_0 = vv_0.globalOffset();
int off_1 = vv_1.globalOffset();
// create its Epetra counter part
Epetra_Vector ev(epetra_A->OperatorDomainMap());
epetra_A->copyThyraIntoEpetra(*tv, ev);
// compare handle_tv to ev!
TEST_EQUALITY(tv->space()->dim(), as<Ordinal>(ev.GlobalLength()));
const int numMyElements = domainMap.NumMyElements();
double tval = 0.0;
for(int i=0; i < numMyElements; i++) {
int gid = domainMap.GID(i);
if(gid<nx*ny)
tval = vv_0[gid-off_0];
else
tval = vv_1[gid-off_1-nx*ny];
TEST_EQUALITY(ev[i], tval);
}
}
// create a vector to test: copyEpetraIntoThyra
{
// create an Epetra vector
Epetra_Vector ev(epetra_A->OperatorDomainMap());
ev.Random();
//.........这里部分代码省略.........
示例6: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
int nx;
if (argc > 1)
nx = (int) strtol(argv[1],NULL,10);
else
nx = 256;
int ny = nx * Comm.NumProc(); // each subdomain is a square
ParameterList GaleriList;
GaleriList.set("nx", nx);
GaleriList.set("ny", ny);
GaleriList.set("mx", 1);
GaleriList.set("my", Comm.NumProc());
int NumNodes = nx*ny;
int NumPDEEqns = 2;
Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList);
Epetra_CrsMatrix* CrsA = CreateCrsMatrix("Laplace2D", Map, GaleriList);
Epetra_VbrMatrix* A = CreateVbrMatrix(CrsA, NumPDEEqns);
Epetra_Vector LHS(A->DomainMap()); LHS.PutScalar(0);
Epetra_Vector RHS(A->DomainMap()); RHS.Random();
Epetra_LinearProblem Problem(A, &LHS, &RHS);
AztecOO solver(Problem);
double *x_coord = 0, *y_coord = 0, *z_coord = 0;
Epetra_MultiVector *coords = CreateCartesianCoordinates("2D", &(CrsA->Map()),
GaleriList);
double **ttt;
if (!coords->ExtractView(&ttt)) {
x_coord = ttt[0];
y_coord = ttt[1];
} else {
printf("Error extracting coordinate vectors\n");
# ifdef HAVE_MPI
MPI_Finalize() ;
# endif
exit(EXIT_FAILURE);
}
ParameterList MLList;
SetDefaults("SA",MLList);
MLList.set("ML output",10);
MLList.set("max levels",10);
MLList.set("increasing or decreasing","increasing");
MLList.set("smoother: type", "Chebyshev");
MLList.set("smoother: sweeps", 3);
// *) if a low number, it will use all the available processes
// *) if a big number, it will use only processor 0 on the next level
MLList.set("aggregation: next-level aggregates per process", 1);
MLList.set("aggregation: type (level 0)", "Zoltan");
MLList.set("aggregation: type (level 1)", "Uncoupled");
MLList.set("aggregation: type (level 2)", "Zoltan");
MLList.set("aggregation: smoothing sweeps", 2);
MLList.set("x-coordinates", x_coord);
MLList.set("y-coordinates", y_coord);
MLList.set("z-coordinates", z_coord);
// specify the reduction with respect to the previous level
// (very small values can break the code)
int ratio = 16;
MLList.set("aggregation: global aggregates (level 0)",
NumNodes / ratio);
MLList.set("aggregation: global aggregates (level 1)",
NumNodes / (ratio * ratio));
MLList.set("aggregation: global aggregates (level 2)",
NumNodes / (ratio * ratio * ratio));
MultiLevelPreconditioner* MLPrec =
new MultiLevelPreconditioner(*A, MLList, true);
solver.SetPrecOperator(MLPrec);
solver.SetAztecOption(AZ_solver, AZ_cg_condnum);
solver.SetAztecOption(AZ_output, 1);
solver.Iterate(100, 1e-12);
// compute the real residual
Epetra_Vector Residual(A->DomainMap());
//1.0 * RHS + 0.0 * RHS - 1.0 * (A * LHS)
A->Apply(LHS,Residual);
Residual.Update(1.0, RHS, 0.0, RHS, -1.0);
double rn;
Residual.Norm2(&rn);
if (Comm.MyPID() == 0 )
std::cout << "||b-Ax||_2 = " << rn << endl;
//.........这里部分代码省略.........
示例7: main
// ======================================================================
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
verbose = (Comm.MyPID() == 0);
for (int i = 1 ; i < argc ; ++i) {
if (strcmp(argv[i],"-s") == 0) {
SymmetricGallery = true;
Solver = AZ_cg;
}
}
// size of the global matrix.
Teuchos::ParameterList GaleriList;
int nx = 30;
GaleriList.set("nx", nx);
GaleriList.set("ny", nx * Comm.NumProc());
GaleriList.set("mx", 1);
GaleriList.set("my", Comm.NumProc());
Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
Teuchos::RefCountPtr<Epetra_CrsMatrix> A;
if (SymmetricGallery)
A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
else
A = Teuchos::rcp( Galeri::CreateCrsMatrix("Recirc2D", &*Map, GaleriList) );
// test the preconditioner
int TestPassed = true;
// ======================================== //
// first verify that we can get convergence //
// with all point relaxation methods //
// ======================================== //
if(!BasicTest("Jacobi",A,false))
TestPassed = false;
if(!BasicTest("symmetric Gauss-Seidel",A,false))
TestPassed = false;
if(!BasicTest("symmetric Gauss-Seidel",A,false,true))
TestPassed = false;
if (!SymmetricGallery) {
if(!BasicTest("Gauss-Seidel",A,false))
TestPassed = false;
if(!BasicTest("Gauss-Seidel",A,true))
TestPassed = false;
if(!BasicTest("Gauss-Seidel",A,false,true))
TestPassed = false;
if(!BasicTest("Gauss-Seidel",A,true,true))
TestPassed = false;
}
// ============================= //
// check uses as preconditioners //
// ============================= //
if(!KrylovTest("symmetric Gauss-Seidel",A,false))
TestPassed = false;
if(!KrylovTest("symmetric Gauss-Seidel",A,false,true))
TestPassed = false;
if (!SymmetricGallery) {
if(!KrylovTest("Gauss-Seidel",A,false))
TestPassed = false;
if(!KrylovTest("Gauss-Seidel",A,true))
TestPassed = false;
if(!KrylovTest("Gauss-Seidel",A,false,true))
TestPassed = false;
if(!KrylovTest("Gauss-Seidel",A,true,true))
TestPassed = false;
}
// ================================== //
// compare point and block relaxation //
// ================================== //
//TestPassed = TestPassed &&
// ComparePointAndBlock("Jacobi",A,1);
TestPassed = TestPassed &&
ComparePointAndBlock("Jacobi",A,10);
//TestPassed = TestPassed &&
//ComparePointAndBlock("symmetric Gauss-Seidel",A,1);
//.........这里部分代码省略.........
示例8: main
// main driver
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc, &argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
if (Comm.NumProc() != 2) {
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return(0);
}
int NumMyElements = 0; // NODES assigned to this processor
int NumMyExternalElements = 0; // nodes used by this proc, but not hosted
int NumMyTotalElements = 0;
int FE_NumMyElements = 0; // TRIANGLES assigned to this processor
int * MyGlobalElements = 0; // nodes assigned to this processor
Epetra_IntSerialDenseMatrix T; // store the grid connectivity
int MyPID=Comm.MyPID();
cout << MyPID << endl;
switch( MyPID ) {
case 0:
NumMyElements = 3;
NumMyExternalElements = 2;
NumMyTotalElements = NumMyElements + NumMyExternalElements;
FE_NumMyElements = 3;
MyGlobalElements = new int[NumMyTotalElements];
MyGlobalElements[0] = 0;
MyGlobalElements[1] = 4;
MyGlobalElements[2] = 3;
MyGlobalElements[3] = 1;
MyGlobalElements[4] = 5;
break;
case 1:
NumMyElements = 3;
NumMyExternalElements = 2;
NumMyTotalElements = NumMyElements + NumMyExternalElements;
FE_NumMyElements = 3;
MyGlobalElements = new int[NumMyTotalElements];
MyGlobalElements[0] = 1;
MyGlobalElements[1] = 2;
MyGlobalElements[2] = 5;
MyGlobalElements[3] = 0;
MyGlobalElements[4] = 4;
break;
}
// build Map corresponding to update
Epetra_Map Map(-1,NumMyElements,MyGlobalElements,0,Comm);
// vector containing coordinates BEFORE exchanging external nodes
Epetra_Vector CoordX_noExt(Map);
Epetra_Vector CoordY_noExt(Map);
switch( MyPID ) {
case 0:
T.Shape(3,FE_NumMyElements);
// fill x-coordinates
CoordX_noExt[0] = 0.0;
CoordX_noExt[1] = 1.0;
CoordX_noExt[2] = 0.0;
// fill y-coordinates
CoordY_noExt[0] = 0.0;
CoordY_noExt[1] = 1.0;
CoordY_noExt[2] = 1.0;
// fill connectivity
T(0,0) = 0; T(0,1) = 4; T(0,2) = 3;
T(1,0) = 0; T(1,1) = 1; T(1,2) = 4;
T(2,0) = 4; T(2,1) = 1; T(2,2) = 5;
break;
case 1:
T.Shape(3,FE_NumMyElements);
// fill x-coordinates
CoordX_noExt[0] = 1.0;
CoordX_noExt[1] = 2.0;
CoordX_noExt[2] = 2.0;
// fill y-coordinates
CoordY_noExt[0] = 0.0;
CoordY_noExt[1] = 0.0;
CoordY_noExt[2] = 1.0;
// fill connectivity
//.........这里部分代码省略.........
示例9: main
// ------------------------------------------------------------------------
// --------------------------- Main Program -----------------------------
// ------------------------------------------------------------------------
int main(int argc, char *argv[])
{
// Initialize MPI
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
// Get the process ID and the total number of processors
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
bool verbose = false;
// Check for verbose output
if (argc>1)
if (argv[1][0]=='-' && argv[1][1]=='v')
verbose = true;
// Get the number of elements from the command line
int NumGlobalElements = 0;
if ((argc > 2) && (verbose))
NumGlobalElements = atoi(argv[2]) + 1;
else if ((argc > 1) && (!verbose))
NumGlobalElements = atoi(argv[1]) + 1;
else
NumGlobalElements = 101;
bool success = false;
try {
// The number of unknowns must be at least equal to the
// number of processors.
if (NumGlobalElements < NumProc) {
std::cout << "numGlobalBlocks = " << NumGlobalElements
<< " cannot be < number of processors = " << NumProc << std::endl;
throw "NOX Error";
}
// Create the interface between NOX and the application
// This object is derived from NOX::Epetra::Interface
Teuchos::RCP<TransientInterface> interface =
Teuchos::rcp(new TransientInterface(NumGlobalElements, Comm, -20.0, 20.0));
double dt = 0.10;
interface->setdt(dt);
// Set the PDE nonlinear coefficient for this problem
interface->setPDEfactor(1.0);
// Get the vector from the Problem
Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
NOX::Epetra::Vector noxSoln(soln, NOX::Epetra::Vector::CreateView);
// Begin Nonlinear Solver ************************************
// Create the top level parameter list
Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
Teuchos::rcp(new Teuchos::ParameterList);
Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());
// Set the nonlinear solver method
nlParams.set("Nonlinear Solver", "Line Search Based");
// Set the printing parameters in the "Printing" sublist
Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 3);
printParams.set("Output Processor", 0);
if (verbose)
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::LinearSolverDetails +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::Debug +
NOX::Utils::Error);
else
printParams.set("Output Information", NOX::Utils::Error);
// Create a print class for controlling output below
NOX::Utils utils(printParams);
// Sublist for line search
Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search");
searchParams.set("Method", "Full Step");
// Sublist for direction
Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
dirParams.set("Method", "Newton");
Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton");
//.........这里部分代码省略.........
示例10: main
int main(int argc, char *argv[])
{
int ierr = 0;
// Initialize MPI
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
// Get the process ID and the total number of processors
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
// Check verbosity level
bool verbose = false;
if (argc > 1)
if (argv[1][0]=='-' && argv[1][1]=='v')
verbose = true;
// Get the number of elements from the command line
int NumGlobalElements = 0;
if ((argc > 2) && (verbose))
NumGlobalElements = atoi(argv[2]) + 1;
else if ((argc > 1) && (!verbose))
NumGlobalElements = atoi(argv[1]) + 1;
else
NumGlobalElements = 101;
bool success = false;
try {
// The number of unknowns must be at least equal to the
// number of processors.
if (NumGlobalElements < NumProc) {
std::cout << "Error: numGlobalBlocks = " << NumGlobalElements
<< " cannot be < number of processors = " << NumProc << std::endl;
throw;
}
// Create the FiniteElementProblem class. This creates all required
// Epetra objects for the problem and allows calls to the
// function (RHS) and Jacobian evaluation routines.
FiniteElementProblem Problem(NumGlobalElements, Comm);
// Get the vector from the Problem
Teuchos::RCP<Epetra_Vector> soln = Problem.getSolution();
NOX::Epetra::Vector noxSoln(soln, NOX::Epetra::Vector::CreateView);
// Initialize Solution
soln->PutScalar(1.0);
// Begin Nonlinear Solver ************************************
// Create the top level parameter list
Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
Teuchos::rcp(new Teuchos::ParameterList);
Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());
// Set the nonlinear solver method
nlParams.set("Nonlinear Solver", "Line Search Based");
//nlParams.set("Nonlinear Solver", "Trust Region Based");
// Set the printing parameters in the "Printing" sublist
Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 3);
printParams.set("Output Processor", 0);
if (verbose)
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::LinearSolverDetails +
NOX::Utils::InnerIteration +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning);
else
printParams.set("Output Information", NOX::Utils::Error +
NOX::Utils::TestDetails);
// Sublist for line search
Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search");
searchParams.set("Method", "Full Step");
// Sublist for direction
Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
dirParams.set("Method", "Newton");
Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton");
newtonParams.set("Forcing Term Method", "Constant");
// Sublist for linear solver for the Newton method
Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver");
//.........这里部分代码省略.........
示例11: main
int main(int argc, char *argv[])
{
int ierr = 0;
double elapsed_time;
double total_flops;
double MFLOPs;
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm comm;
#endif
bool verbose = false;
bool summary = false;
// Check if we should print verbose results to standard out
if (argc>6) if (argv[6][0]=='-' && argv[6][1]=='v') verbose = true;
// Check if we should print verbose results to standard out
if (argc>6) if (argv[6][0]=='-' && argv[6][1]=='s') summary = true;
if(argc < 6) {
cerr << "Usage: " << argv[0]
<< " NumNodesX NumNodesY NumProcX NumProcY NumPoints [-v|-s]" << endl
<< "where:" << endl
<< "NumNodesX - Number of mesh nodes in X direction per processor" << endl
<< "NumNodesY - Number of mesh nodes in Y direction per processor" << endl
<< "NumProcX - Number of processors to use in X direction" << endl
<< "NumProcY - Number of processors to use in Y direction" << endl
<< "NumPoints - Number of points to use in stencil (5, 9 or 25 only)" << endl
<< "-v|-s - (Optional) Run in verbose mode if -v present or summary mode if -s present" << endl
<< " NOTES: NumProcX*NumProcY must equal the number of processors used to run the problem." << endl << endl
<< " Serial example:" << endl
<< argv[0] << " 16 12 1 1 25 -v" << endl
<< " Run this program in verbose mode on 1 processor using a 16 X 12 grid with a 25 point stencil."<< endl <<endl
<< " MPI example:" << endl
<< "mpirun -np 32 " << argv[0] << " 10 12 4 8 9 -v" << endl
<< " Run this program in verbose mode on 32 processors putting a 10 X 12 subgrid on each processor using 4 processors "<< endl
<< " in the X direction and 8 in the Y direction. Total grid size is 40 points in X and 96 in Y with a 9 point stencil."<< endl
<< endl;
return(1);
}
//char tmp;
//if (comm.MyPID()==0) cout << "Press any key to continue..."<< endl;
//if (comm.MyPID()==0) cin >> tmp;
//comm.Barrier();
comm.SetTracebackMode(0); // This should shut down any error traceback reporting
if (verbose && comm.MyPID()==0)
cout << Epetra_Version() << endl << endl;
if (summary && comm.MyPID()==0) {
if (comm.NumProc()==1)
cout << Epetra_Version() << endl << endl;
else
cout << endl << endl; // Print two blank line to keep output columns lined up
}
if (verbose) cout << comm <<endl;
// Redefine verbose to only print on PE 0
if (verbose && comm.MyPID()!=0) verbose = false;
if (summary && comm.MyPID()!=0) summary = false;
int numNodesX = atoi(argv[1]);
int numNodesY = atoi(argv[2]);
int numProcsX = atoi(argv[3]);
int numProcsY = atoi(argv[4]);
int numPoints = atoi(argv[5]);
if (verbose || (summary && comm.NumProc()==1)) {
cout << " Number of local nodes in X direction = " << numNodesX << endl
<< " Number of local nodes in Y direction = " << numNodesY << endl
<< " Number of global nodes in X direction = " << numNodesX*numProcsX << endl
<< " Number of global nodes in Y direction = " << numNodesY*numProcsY << endl
<< " Number of local nonzero entries = " << numNodesX*numNodesY*numPoints << endl
<< " Number of global nonzero entries = " << numNodesX*numNodesY*numPoints*numProcsX*numProcsY << endl
<< " Number of Processors in X direction = " << numProcsX << endl
<< " Number of Processors in Y direction = " << numProcsY << endl
<< " Number of Points in stencil = " << numPoints << endl << endl;
}
// Print blank line to keep output columns lined up
if (summary && comm.NumProc()>1)
cout << endl << endl << endl << endl << endl << endl << endl << endl<< endl << endl;
if (numProcsX*numProcsY!=comm.NumProc()) {
cerr << "Number of processors = " << comm.NumProc() << endl
<< " is not the product of " << numProcsX << " and " << numProcsY << endl << endl;
return(1);
}
if (numPoints!=5 && numPoints!=9 && numPoints!=25) {
cerr << "Number of points specified = " << numPoints << endl
//.........这里部分代码省略.........
示例12: main
int main(int argc, char *argv[]) {
// Initialize MPI
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
bool verbose = false;
if (argc > 1)
if (argv[1][0]=='-' && argv[1][1]=='v')
verbose = true;
// Get the process ID and the total number of processors
int MyPID = Comm.MyPID();
#ifdef HAVE_MPI
int NumProc = Comm.NumProc();
#endif
// Set up the printing utilities
Teuchos::RCP<Teuchos::ParameterList> noxParamsPtr =
Teuchos::rcp(new Teuchos::ParameterList);
Teuchos::ParameterList& noxParams = *(noxParamsPtr.get());
// Only print output if the "-v" flag is set on the command line
Teuchos::ParameterList& printParams = noxParams.sublist("Printing");
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 5);
printParams.set("Output Processor", 0);
if( verbose )
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::TestDetails);
else
printParams.set("Output Information", NOX::Utils::Error +
NOX::Utils::TestDetails);
NOX::Utils printing(printParams);
// Identify the test problem
if (printing.isPrintType(NOX::Utils::TestDetails))
printing.out() << "Starting epetra/NOX_Group/NOX_Group.exe" << endl;
// Identify processor information
#ifdef HAVE_MPI
if (printing.isPrintType(NOX::Utils::TestDetails)) {
printing.out() << "Parallel Run" << endl;
printing.out() << "Number of processors = " << NumProc << endl;
printing.out() << "Print Process = " << MyPID << endl;
}
Comm.Barrier();
if (printing.isPrintType(NOX::Utils::TestDetails))
printing.out() << "Process " << MyPID << " is alive!" << endl;
Comm.Barrier();
#else
if (printing.isPrintType(NOX::Utils::TestDetails))
printing.out() << "Serial Run" << endl;
#endif
// Return value
int status = 0;
// *** Insert Testing Here!!! ***
if (status == 0)
printing.out() << "Test passed!" << endl;
else
printing.out() << "Test failed!" << endl;
#ifdef HAVE_MPI
MPI_Finalize();
#endif
// return 0 for a successful test
return status;
}
示例13: main
// ======================================================================
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
verbose = (Comm.MyPID() == 0);
int nx = 60;
for (int i = 1 ; i < argc ; ++i) {
if (strcmp(argv[i],"-s") == 0) {
SymmetricGallery = true;
Solver = AZ_cg;
}
if(strcmp(argv[i],"-n") == 0 && i+1 < argc) {
i++;
nx = atoi(argv[i]);
}
}
// size of the global matrix.
Teuchos::ParameterList GaleriList;
GaleriList.set("nx", nx);
GaleriList.set("ny", nx * Comm.NumProc());
GaleriList.set("mx", 1);
GaleriList.set("my", Comm.NumProc());
Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
Teuchos::RefCountPtr<Epetra_CrsMatrix> A;
if (SymmetricGallery)
A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
else
A = Teuchos::rcp( Galeri::CreateCrsMatrix("Recirc2D", &*Map, GaleriList) );
// coordinates
Teuchos::RCP<Epetra_MultiVector> coord = Teuchos::rcp( Galeri::CreateCartesianCoordinates("2D",&*Map,GaleriList));
// test the preconditioner
int TestPassed = true;
int who = RUSAGE_SELF;
struct rusage usage;
//int ret;
//ret = getrusage(who, &usage);
struct timeval ru_utime;
// struct timeval ru_stime;
ru_utime = usage.ru_utime;
// ================================== //
// compare point and block relaxation //
// ================================== //
TestPassed = TestPassed &&
ComparePointAndBlock("Jacobi",A,10);
if(verbose) printf(" Jacobi Finished \n");
//ret = getrusage(who, &usage);
int sec = usage.ru_utime.tv_sec -ru_utime.tv_sec;
int usec = usage.ru_utime.tv_usec -ru_utime.tv_usec;
double tt = (double)sec + 1e-6*(double)usec;
ru_utime = usage.ru_utime;
if(verbose) printf(" Jacobi time %f \n",tt);
TestPassed = TestPassed &&
ComparePointAndBlock("symmetric Gauss-Seidel",A,10);
if(verbose) printf(" sGS finished \n");
//ret = getrusage(who, &usage);
sec = usage.ru_utime.tv_sec -ru_utime.tv_sec;
usec = usage.ru_utime.tv_usec -ru_utime.tv_usec;
tt = (double)sec + 1e-6*(double)usec;
ru_utime = usage.ru_utime;
if(verbose) printf(" sGS time %f \n",tt);
if (!SymmetricGallery) {
TestPassed = TestPassed &&
ComparePointAndBlock("Gauss-Seidel",A,10);
//ret = getrusage(who, &usage);
sec = usage.ru_utime.tv_sec -ru_utime.tv_sec;
usec = usage.ru_utime.tv_usec -ru_utime.tv_usec;
tt = (double)sec + 1e-6*(double)usec;
ru_utime = usage.ru_utime;
if(verbose) printf(" GS time %f \n",tt);
if(verbose) printf(" GS Finished \n");
}
if (!TestPassed) {
cout << "Test `Performance.exe' failed!" << endl;
exit(EXIT_FAILURE);
}
#ifdef HAVE_MPI
MPI_Finalize();
#endif
//.........这里部分代码省略.........
示例14: Comm
#include "Thyra_VectorBase.hpp"
using namespace std;
TEUCHOS_UNIT_TEST(dimension, default)
{
int status = 0;
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
TEST_ASSERT(Comm.NumProc() == 1);
::Stratimikos::DefaultLinearSolverBuilder builder;
Teuchos::RCP<Teuchos::ParameterList> p =
Teuchos::rcp(new Teuchos::ParameterList);
{
p->set("Linear Solver Type", "AztecOO");
//p->set("Preconditioner Type", "Ifpack");
p->set("Preconditioner Type", "None");
Teuchos::ParameterList& az = p->sublist("Linear Solver Types").sublist("AztecOO");
az.sublist("Forward Solve").sublist("AztecOO Settings").set("Output Frequency", 1);
az.sublist("VerboseObject").set("Verbosity Level", "high");
Teuchos::ParameterList& ip = p->sublist("Preconditioner Types").sublist("Ifpack");
ip.sublist("VerboseObject").set("Verbosity Level", "high");
}
示例15: main
int main(int argc, char *argv[])
{
// Initialize MPI
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
int status = 0; // Converged
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
// Get the process ID and the total number of processors
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
// Check verbosity level
bool verbose = false;
if (argc > 1)
if (argv[1][0]=='-' && argv[1][1]=='v')
verbose = true;
// Get the number of elements from the command line
int NumGlobalElements = 0;
if ((argc > 2) && (verbose))
NumGlobalElements = atoi(argv[2]) + 1;
else if ((argc > 1) && (!verbose))
NumGlobalElements = atoi(argv[1]) + 1;
else
NumGlobalElements = 200;
bool success = false;
try {
// The number of unknowns must be at least equal to the
// number of processors.
if (NumGlobalElements < NumProc) {
std::cout << "numGlobalBlocks = " << NumGlobalElements
<< " cannot be < number of processors = " << NumProc << std::endl;
std::cout << "Test failed!" << std::endl;
throw "NOX Error";
}
if (verbose)
if (MyPID == 0)
std::cout << "\n" << NOX::version() << std::endl;
// Create the interface between NOX and the application
// This object is derived from NOX::Epetra::Interface
Teuchos::RCP<Interface> interface =
Teuchos::rcp(new Interface(NumGlobalElements, Comm));
// Get the vector from the Problem
Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
Teuchos::RCP<NOX::Epetra::Vector> noxSoln =
Teuchos::rcp(new NOX::Epetra::Vector(soln, NOX::Epetra::Vector::CreateView));
// Set the PDE factor (for nonlinear forcing term). This could be specified
// via user input.
interface->setPDEfactor(1000.0);
// Set the initial guess
soln->PutScalar(1.0);
// Begin Nonlinear Solver ************************************
// Create the top level parameter list
Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
Teuchos::rcp(new Teuchos::ParameterList);
Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());
// Set the nonlinear solver method
nlParams.set("Nonlinear Solver", "Anderson Accelerated Fixed-Point");
nlParams.sublist("Anderson Parameters").set("Storage Depth", 2);
nlParams.sublist("Anderson Parameters").set("Mixing Parameter", -1.0);
nlParams.sublist("Anderson Parameters").sublist("Preconditioning").set("Precondition", true);
nlParams.sublist("Anderson Parameters").sublist("Preconditioning").set("Recompute Jacobian", true);
//nlParams.set("Nonlinear Solver", "Line Search Based");
// Set the printing parameters in the "Printing" sublist
Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 3);
printParams.set("Output Processor", 0);
if (verbose)
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::LinearSolverDetails +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::Debug +
NOX::Utils::TestDetails +
NOX::Utils::Error);
//.........这里部分代码省略.........