本文整理汇总了C++中teuchos::RCP::Apply方法的典型用法代码示例。如果您正苦于以下问题:C++ RCP::Apply方法的具体用法?C++ RCP::Apply怎么用?C++ RCP::Apply使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类teuchos::RCP
的用法示例。
在下文中一共展示了RCP::Apply方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: logic_error
int
AmesosGenOp::Apply (const Epetra_MultiVector& X, Epetra_MultiVector& Y) const
{
if (problem_ == NULL) {
throw std::logic_error ("AmesosGenOp::Apply: problem_ is NULL");
}
if (massMtx_.is_null ()) {
throw std::logic_error ("AmesosGenOp::Apply: massMtx_ is null");
}
if (solver_.is_null ()) {
throw std::logic_error ("AmesosGenOp::Apply: solver_ is null");
}
if (! useTranspose_) {
// Storage for M*X
Epetra_MultiVector MX (X.Map (), X.NumVectors ());
// Apply M*X
massMtx_->Apply (X, MX);
Y.PutScalar (0.0);
// Set the LHS and RHS
problem_->SetRHS (&MX);
problem_->SetLHS (&Y);
// Solve the linear system A*Y = MX
solver_->Solve ();
}
else { // apply the transposed operator
// Storage for A^{-T}*X
Epetra_MultiVector ATX (X.Map (), X.NumVectors ());
Epetra_MultiVector tmpX = const_cast<Epetra_MultiVector&> (X);
// Set the LHS and RHS
problem_->SetRHS (&tmpX);
problem_->SetLHS (&ATX);
// Solve the linear system A^T*Y = X
solver_->Solve ();
// Apply M*ATX
massMtx_->Apply (ATX, Y);
}
return 0; // the method completed correctly
}
示例2: Apply
int AmesosGenOp::Apply(const Epetra_MultiVector& X, Epetra_MultiVector& Y ) const
{
if (!useTranspose_) {
// Storage for M*X
Epetra_MultiVector MX(X.Map(),X.NumVectors());
// Apply M*X
massMtx_->Apply(X, MX);
Y.PutScalar(0.0);
// Set the LHS and RHS
problem_->SetRHS(&MX);
problem_->SetLHS(&Y);
// Solve the linear system A*Y = MX
solver_->Solve();
}
else {
// Storage for A^{-T}*X
Epetra_MultiVector ATX(X.Map(),X.NumVectors());
Epetra_MultiVector tmpX = const_cast<Epetra_MultiVector&>(X);
// Set the LHS and RHS
problem_->SetRHS(&tmpX);
problem_->SetLHS(&ATX);
// Solve the linear system A^T*Y = X
solver_->Solve();
// Apply M*ATX
massMtx_->Apply(ATX, Y);
}
return 0;
}
示例3: apply_block
void apply_block(const int i, const int j, Teuchos::RCP<Epetra_Vector>& output)
{
const Teuchos::RCP<const Epetra_Operator> block = blocked_op->GetBlock(i,j);
Epetra_Vector testvec(block->OperatorDomainMap());
int num_entries = block->OperatorDomainMap().NumMyElements();
std::vector<int> indices(num_entries);
for(int i = 0; i != num_entries; ++i)
indices[i] = i;
if(j ==0)
testvec.ReplaceMyValues(num_entries, &u_test_vec[0], &indices[0]);
else
testvec.ReplaceMyValues(num_entries, &p_test_vec[0], &indices[0]);
output = Teuchos::rcp(new Epetra_Vector(block->OperatorRangeMap()));
block->Apply(testvec, *output);
}
示例4: Apply
int AmesosBucklingOp::Apply(const Epetra_MultiVector& X, Epetra_MultiVector& Y ) const
{
// Storage for A*X
Epetra_MultiVector AX(X.Map(),X.NumVectors());
// Apply A*X
stiffMtx_->Apply(X, AX);
Y.PutScalar(0.0);
// Set the LHS and RHS
problem_->SetRHS(&AX);
problem_->SetLHS(&Y);
// Solve the linear system (A-sigma*M)*Y = AX
solver_->Solve();
return 0;
}
示例5: Kvec
void
QCAD::GenEigensolver::evalModel(const InArgs& inArgs,
const OutArgs& outArgs ) const
{
// type definitions
typedef Epetra_MultiVector MV;
typedef Epetra_Operator OP;
typedef Anasazi::MultiVecTraits<double, Epetra_MultiVector> MVT;
// Get the stiffness and mass matrices
InArgs model_inArgs = model->createInArgs();
OutArgs model_outArgs = model->createOutArgs();
//input args
model_inArgs.set_t(0.0);
Teuchos::RCP<const Epetra_Vector> x = model->get_x_init();
Teuchos::RCP<const Epetra_Vector> x_dot = model->get_x_dot_init();
model_inArgs.set_x(x);
model_inArgs.set_x_dot(x_dot);
model_inArgs.set_alpha(0.0);
model_inArgs.set_beta(1.0);
for(int i=0; i<model_num_p; i++)
model_inArgs.set_p(i, inArgs.get_p(i));
//output args
Teuchos::RCP<Epetra_CrsMatrix> K =
Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(model->create_W(), true);
model_outArgs.set_W(K);
model->evalModel(model_inArgs, model_outArgs); //compute K matrix
// reset alpha and beta to compute the mass matrix
model_inArgs.set_alpha(1.0);
model_inArgs.set_beta(0.0);
Teuchos::RCP<Epetra_CrsMatrix> M =
Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(model->create_W(), true);
model_outArgs.set_W(M);
model->evalModel(model_inArgs, model_outArgs); //compute M matrix
Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(K->OperatorDomainMap(), blockSize) );
ivec->Random();
// Create the eigenproblem.
Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > eigenProblem =
Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>(K, M, ivec) );
// Inform the eigenproblem that the operator A is symmetric
eigenProblem->setHermitian(bHermitian);
// Set the number of eigenvalues requested
eigenProblem->setNEV( nev );
// Inform the eigenproblem that you are finishing passing it information
bool bSuccess = eigenProblem->setProblem();
TEUCHOS_TEST_FOR_EXCEPTION(!bSuccess, Teuchos::Exceptions::InvalidParameter,
"Anasazi::BasicEigenproblem::setProblem() returned an error.\n" << std::endl);
// Create parameter list to pass into the solver manager
//
Teuchos::ParameterList eigenPL;
eigenPL.set( "Which", which );
eigenPL.set( "Block Size", blockSize );
eigenPL.set( "Maximum Iterations", maxIters );
eigenPL.set( "Convergence Tolerance", conv_tol );
eigenPL.set( "Full Ortho", true );
eigenPL.set( "Use Locking", true );
eigenPL.set( "Verbosity", Anasazi::IterationDetails );
// Create the solver manager
Anasazi::LOBPCGSolMgr<double, MV, OP> eigenSolverMan(eigenProblem, eigenPL);
// Solve the problem
Anasazi::ReturnType returnCode = eigenSolverMan.solve();
// Get the eigenvalues and eigenvectors from the eigenproblem
Anasazi::Eigensolution<double,MV> sol = eigenProblem->getSolution();
std::vector<Anasazi::Value<double> > evals = sol.Evals;
Teuchos::RCP<MV> evecs = sol.Evecs;
std::vector<double> evals_real(sol.numVecs);
for(int i=0; i<sol.numVecs; i++) evals_real[i] = evals[i].realpart;
// Compute residuals.
std::vector<double> normR(sol.numVecs);
if (sol.numVecs > 0) {
Teuchos::SerialDenseMatrix<int,double> T(sol.numVecs, sol.numVecs);
Epetra_MultiVector Kvec( K->OperatorDomainMap(), evecs->NumVectors() );
Epetra_MultiVector Mvec( M->OperatorDomainMap(), evecs->NumVectors() );
T.putScalar(0.0);
for (int i=0; i<sol.numVecs; i++) {
T(i,i) = evals_real[i];
}
K->Apply( *evecs, Kvec );
M->Apply( *evecs, Mvec );
MVT::MvTimesMatAddMv( -1.0, Mvec, T, 1.0, Kvec );
MVT::MvNorm( Kvec, normR );
//.........这里部分代码省略.........
示例6: main
//.........这里部分代码省略.........
Teuchos::rcp( new Belos::LinearProblem<double,MV,OP>(A, X, B) );
// The 2-D laplacian is symmetric. Specify this in the linear problem.
myProblem->setHermitian();
// Signal that we are done setting up the linear problem
ierr = myProblem->setProblem();
// Check the return from setProblem(). If this is true, there was an
// error. This probably means we did not specify enough information for
// the eigenproblem.
assert(ierr == true);
// Specify the verbosity level. Options include:
// Belos::Errors
// This option is always set
// Belos::Warnings
// Warnings (less severe than errors)
// Belos::IterationDetails
// Details at each iteration, such as the current eigenvalues
// Belos::OrthoDetails
// Details about orthogonality
// Belos::TimingDetails
// A summary of the timing info for the solve() routine
// Belos::FinalSummary
// A final summary
// Belos::Debug
// Debugging information
int verbosity = Belos::Warnings + Belos::Errors + Belos::FinalSummary + Belos::TimingDetails;
// Create the parameter list for the eigensolver
Teuchos::RCP<Teuchos::ParameterList> myPL = Teuchos::rcp( new Teuchos::ParameterList() );
myPL->set( "Verbosity", verbosity );
myPL->set( "Block Size", blocksize );
myPL->set( "Maximum Iterations", 100 );
myPL->set( "Convergence Tolerance", 1.0e-8 );
// Create the Block CG solver
// This takes as inputs the linear problem and the solver parameters
Belos::BlockCGSolMgr<double,MV,OP> mySolver(myProblem, myPL);
// Solve the linear problem, and save the return code
Belos::ReturnType solverRet = mySolver.solve();
// Check return code of the solver: Unconverged, Failed, or OK
switch (solverRet) {
// UNCONVERGED
case Belos::Unconverged:
if (verbose)
cout << "Belos::BlockCGSolMgr::solve() did not converge!" << endl;
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return 0;
break;
// CONVERGED
case Belos::Converged:
if (verbose)
cout << "Belos::BlockCGSolMgr::solve() converged!" << endl;
break;
}
// Test residuals
Epetra_MultiVector R( B->Map(), blocksize );
// R = A*X
A->Apply( *X, R );
// R -= B
MVT::MvAddMv( -1.0, *B, 1.0, R, R );
// Compute the 2-norm of each vector in the MultiVector
// and store them to a std::vector<double>
std::vector<double> normR(blocksize), normB(blocksize);
MVT::MvNorm( R, normR );
MVT::MvNorm( *B, normB );
// Output results to screen
if(verbose) {
cout << scientific << setprecision(6) << showpoint;
cout << "******************************************************\n"
<< " Results (outside of linear solver) \n"
<< "------------------------------------------------------\n"
<< " Linear System\t\tRelative Residual\n"
<< "------------------------------------------------------\n";
for( int i=0 ; i<blocksize ; ++i ) {
cout << " " << i+1 << "\t\t\t" << normR[i]/normB[i] << endl;
}
cout << "******************************************************\n" << endl;
}
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return(EXIT_SUCCESS);
}
示例7: main
//.........这里部分代码省略.........
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 5);
printParams.set("Output Processor", 0);
if( verbose )
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::TestDetails);
else
printParams.set("Output Information", NOX::Utils::Error +
NOX::Utils::TestDetails);
NOX::Utils printing(printParams);
// Identify the test problem
if (printing.isPrintType(NOX::Utils::TestDetails))
printing.out() << "Starting epetra/NOX_Operators/NOX_Operators.exe" << std::endl;
// Identify processor information
#ifdef HAVE_MPI
if (printing.isPrintType(NOX::Utils::TestDetails)) {
printing.out() << "Parallel Run" << std::endl;
printing.out() << "Number of processors = " << NumProc << std::endl;
printing.out() << "Print Process = " << MyPID << std::endl;
}
Comm.Barrier();
if (printing.isPrintType(NOX::Utils::TestDetails))
printing.out() << "Process " << MyPID << " is alive!" << std::endl;
Comm.Barrier();
#else
if (printing.isPrintType(NOX::Utils::TestDetails))
printing.out() << "Serial Run" << std::endl;
#endif
int status = 0;
Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface;
// Need a NOX::Epetra::Vector for constructor
NOX::Epetra::Vector noxInitGuess(InitialGuess, NOX::DeepCopy);
// Analytic matrix
Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( Problem.GetMatrix(), false );
Epetra_Vector A_resultVec(Problem.GetMatrix()->Map());
interface->computeJacobian( InitialGuess, *A );
A->Apply( directionVec, A_resultVec );
// FD operator
Teuchos::RCP<Epetra_CrsGraph> graph = Teuchos::rcp( const_cast<Epetra_CrsGraph*>(&A->Graph()), false );
Teuchos::RCP<NOX::Epetra::FiniteDifference> FD = Teuchos::rcp(
new NOX::Epetra::FiniteDifference(printParams, iReq, noxInitGuess, graph) );
Epetra_Vector FD_resultVec(Problem.GetMatrix()->Map());
FD->computeJacobian(InitialGuess, *FD);
FD->Apply( directionVec, FD_resultVec );
// Matrix-Free operator
Teuchos::RCP<NOX::Epetra::MatrixFree> MF = Teuchos::rcp(
new NOX::Epetra::MatrixFree(printParams, iReq, noxInitGuess) );
Epetra_Vector MF_resultVec(Problem.GetMatrix()->Map());
MF->computeJacobian(InitialGuess, *MF);
MF->Apply( directionVec, MF_resultVec );
// Need NOX::Epetra::Vectors for tests
NOX::Epetra::Vector noxAvec ( A_resultVec , NOX::DeepCopy );
NOX::Epetra::Vector noxFDvec( FD_resultVec, NOX::DeepCopy );
NOX::Epetra::Vector noxMFvec( MF_resultVec, NOX::DeepCopy );
// Create a TestCompare class
NOX::Epetra::TestCompare tester( printing.out(), printing);
double abstol = 1.e-4;
double reltol = 1.e-4 ;
//NOX::TestCompare::CompareType aComp = NOX::TestCompare::Absolute;
status += tester.testVector( noxFDvec, noxAvec, reltol, abstol,
"Finite-Difference Operator Apply Test" );
status += tester.testVector( noxMFvec, noxAvec, reltol, abstol,
"Matrix-Free Operator Apply Test" );
// Summarize test results
if( status == 0 )
printing.out() << "Test passed!" << std::endl;
else
printing.out() << "Test failed!" << std::endl;
#ifdef HAVE_MPI
MPI_Finalize();
#endif
// Final return value (0 = successfull, non-zero = failure)
return status;
}
示例8: main
//.........这里部分代码省略.........
MyPL.set( "Num Blocks", numBlocks );
MyPL.set( "Maximum Restarts", maxRestarts );
MyPL.set( "Convergence Tolerance", tol );
typedef Anasazi::MultiVec<double> MV;
typedef Anasazi::Operator<double> OP;
// Create an Anasazi::EpetraMultiVec for an initial vector to start the solver.
// Note: This needs to have the same number of columns as the blocksize.
Teuchos::RCP<Anasazi::EpetraMultiVec> ivec = Teuchos::rcp( new Anasazi::EpetraMultiVec(ColMap, blockSize) );
ivec->MvRandom();
// Call the constructor for the (A^T*A) operator
Teuchos::RCP<Anasazi::EpetraSymOp> Amat = Teuchos::rcp( new Anasazi::EpetraSymOp(A) );
Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > MyProblem =
Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>(Amat, ivec) );
// Inform the eigenproblem that the matrix A is symmetric
MyProblem->setHermitian(true);
// Set the number of eigenvalues requested and the blocksize the solver should use
MyProblem->setNEV( nev );
// Inform the eigenproblem that you are finished passing it information
bool boolret = MyProblem->setProblem();
if (boolret != true) {
if (MyPID == 0) {
cout << "Anasazi::BasicEigenproblem::setProblem() returned with error." << endl;
}
#ifdef HAVE_MPI
MPI_Finalize() ;
#endif
return -1;
}
// Initialize the Block Arnoldi solver
Anasazi::BlockKrylovSchurSolMgr<double, MV, OP> MySolverMgr(MyProblem, MyPL);
// Solve the problem to the specified tolerances or length
Anasazi::ReturnType returnCode = MySolverMgr.solve();
if (returnCode != Anasazi::Converged && MyPID==0) {
cout << "Anasazi::EigensolverMgr::solve() returned unconverged." << endl;
}
// Get the eigenvalues and eigenvectors from the eigenproblem
Anasazi::Eigensolution<double,MV> sol = MyProblem->getSolution();
std::vector<Anasazi::Value<double> > evals = sol.Evals;
int numev = sol.numVecs;
if (numev > 0) {
// Compute singular values/vectors and direct residuals.
//
// Compute singular values which are the square root of the eigenvalues
if (MyPID==0) {
cout<<"------------------------------------------------------"<<endl;
cout<<"Computed Singular Values: "<<endl;
cout<<"------------------------------------------------------"<<endl;
}
for (i=0; i<numev; i++) { evals[i].realpart = Teuchos::ScalarTraits<double>::squareroot( evals[i].realpart ); }
//
// Compute left singular vectors : u = Av/sigma
//
std::vector<double> tempnrm(numev), directnrm(numev);
std::vector<int> index(numev);
for (i=0; i<numev; i++) { index[i] = i; }
Anasazi::EpetraMultiVec Av(RowMap,numev), u(RowMap,numev);
Anasazi::EpetraMultiVec* evecs = dynamic_cast<Anasazi::EpetraMultiVec* >(sol.Evecs->CloneViewNonConst( index ));
Teuchos::SerialDenseMatrix<int,double> S(numev,numev);
A->Apply( *evecs, Av );
Av.MvNorm( tempnrm );
for (i=0; i<numev; i++) { S(i,i) = one/tempnrm[i]; };
u.MvTimesMatAddMv( one, Av, S, zero );
//
// Compute direct residuals : || Av - sigma*u ||
//
for (i=0; i<numev; i++) { S(i,i) = evals[i].realpart; }
Av.MvTimesMatAddMv( -one, u, S, one );
Av.MvNorm( directnrm );
if (MyPID==0) {
cout.setf(std::ios_base::right, std::ios_base::adjustfield);
cout<<std::setw(16)<<"Singular Value"
<<std::setw(20)<<"Direct Residual"
<<endl;
cout<<"------------------------------------------------------"<<endl;
for (i=0; i<numev; i++) {
cout<<std::setw(16)<<evals[i].realpart
<<std::setw(20)<<directnrm[i]
<<endl;
}
cout<<"------------------------------------------------------"<<endl;
}
}
#ifdef EPETRA_MPI
MPI_Finalize() ;
#endif
return 0;
}
示例9: main
int main(int argc, char *argv[])
{
std::cout << Epetra_Version() << std::endl << std::endl;
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
Teuchos::RCP<Teuchos::FancyOStream> fos = getFancyOStream(Teuchos::rcpFromRef(std::cout));
// Construct a Map with NumElements and index base of 0
Teuchos::RCP<Epetra_Map> rgMap = Teuchos::rcp(new Epetra_Map(63, 0, Comm));
Teuchos::RCP<Epetra_Map> doMap = Teuchos::rcp(new Epetra_Map(20, 0, Comm));
Epetra_CrsMatrix* Pin = NULL;
EpetraExt::MatrixMarketFileToCrsMatrix("Test.mat",
*rgMap, *rgMap, *doMap,
Pin, false,
true);
Teuchos::RCP<Epetra_CrsMatrix> P = Teuchos::rcp(Pin);
Epetra_CrsMatrix* A = NULL;
A = TridiagMatrix(rgMap.get(), 1.0, -2.0, 1.0);
Teuchos::RCP<Epetra_CrsMatrix> rcpA = Teuchos::rcp(A);
////////////////////////////////////////////
// plain Epetra
////////////////////////////////////////////
// Create x and b vectors
Teuchos::RCP<Epetra_Vector> x = Teuchos::rcp(new Epetra_Vector(*doMap));
Teuchos::RCP<Epetra_Vector> x2 = Teuchos::rcp(new Epetra_Vector(*rgMap));
Teuchos::RCP<Epetra_Vector> b = Teuchos::rcp(new Epetra_Vector(*rgMap));
Teuchos::RCP<Epetra_Vector> b2 = Teuchos::rcp(new Epetra_Vector(*rgMap));
x->PutScalar(1.0);
x2->PutScalar(1.0);
double normx = 0.0;
x->Norm1(&normx);
if (Comm.MyPID() == 0) std::cout << "||x|| = " << normx << std::endl;
x2->Norm1(&normx);
if (Comm.MyPID() == 0) std::cout << "||x2|| = " << normx << std::endl;
/*P->Apply(*x,*b);
normx = 0.0;
b->Norm1(&normx);*/
//if (Comm.MyPID() == 0) std::cout << "||Px||_1 = " << normx << std::endl;
/*Epetra_RowMatrixTransposer et(&*P);
Epetra_CrsMatrix* PT;
int rv = et.CreateTranspose(true,PT);
if (rv != 0) {
std::ostringstream buf;
buf << rv;
std::string msg = "Utils::Transpose: Epetra::RowMatrixTransposer returned value of " + buf.str();
std::cout << msg << std::endl;
}
Teuchos::RCP<Epetra_CrsMatrix> rcpPT(PT);
rcpPT->Apply(*x2,*b2);
normx = 0.0;
b2->Norm1(&normx);*/
//if (Comm.MyPID() == 0) std::cout << "||P^T x||_1 = " << normx << std::endl;
// matrix-matrix multiply
Teuchos::RCP<Epetra_CrsMatrix> AP = Teuchos::rcp(new Epetra_CrsMatrix(Copy,rcpA->RangeMap(),1));
EpetraExt::MatrixMatrix::Multiply(*rcpA,false,*P,false,*AP, *fos);
// AP->FillComplete(P->DomainMap(),rcpA->RangeMap());
//std::cout << *AP << std::endl;
AP->Apply(*x,*b2);
normx = 0.0;
b2->Norm1(&normx);
if (Comm.MyPID() == 0) std::cout << "Epetra: ||AP x||_1 = " << normx << std::endl;
// build A^T explicitely
Epetra_RowMatrixTransposer etA(&*rcpA);
Epetra_CrsMatrix* AT;
int rv3 = etA.CreateTranspose(true,AT);
if (rv3 != 0) {
std::ostringstream buf;
buf << rv3;
std::string msg = "Utils::Transpose: Epetra::RowMatrixTransposer returned value of " + buf.str();
std::cout << msg << std::endl;
}
Teuchos::RCP<Epetra_CrsMatrix> rcpAT(AT);
// calculate A^T Px
Teuchos::RCP<Epetra_CrsMatrix> APexpl = Teuchos::rcp(new Epetra_CrsMatrix(Copy,rcpA->DomainMap(),20));
EpetraExt::MatrixMatrix::Multiply(*rcpAT,false,*P,false,*APexpl, *fos);
// APexpl->FillComplete(P->DomainMap(),rcpA->DomainMap()); // check me
APexpl->Apply(*x,*b2);
normx = 0.0;
b2->Norm1(&normx);
if (Comm.MyPID() == 0) std::cout << "Epetra: ||A^T_expl P x||_1 = " << normx << std::endl;
//.........这里部分代码省略.........
示例10: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
Teuchos::ParameterList GaleriList;
// The problem is defined on a 2D grid, global size is nx * nx.
int nx = 30;
GaleriList.set("n", nx * nx);
GaleriList.set("nx", nx);
GaleriList.set("ny", nx);
Teuchos::RCP<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Linear", Comm, GaleriList) );
Teuchos::RCP<Epetra_RowMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
// Variables used for the Block Davidson Method
const int nev = 4;
const int blockSize = 5;
const int numBlocks = 8;
const int maxRestarts = 100;
const double tol = 1.0e-8;
typedef Epetra_MultiVector MV;
typedef Epetra_Operator OP;
typedef Anasazi::MultiVecTraits<double, Epetra_MultiVector> MVT;
// Create an Epetra_MultiVector for an initial vector to start the solver.
// Note: This needs to have the same number of columns as the blocksize.
//
Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(*Map, blockSize) );
ivec->Random();
// Create the eigenproblem.
Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > problem =
Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>(A, ivec) );
// Inform the eigenproblem that the operator A is symmetric
problem->setHermitian(true);
// Set the number of eigenvalues requested
problem->setNEV( nev );
// Inform the eigenproblem that you are finishing passing it information
bool boolret = problem->setProblem();
if (boolret != true) {
std::cout<<"Anasazi::BasicEigenproblem::setProblem() returned an error." << std::endl;
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return -1;
}
// Create parameter list to pass into the solver manager
Teuchos::ParameterList anasaziPL;
anasaziPL.set( "Which", "LM" );
anasaziPL.set( "Block Size", blockSize );
anasaziPL.set( "Maximum Iterations", 500 );
anasaziPL.set( "Convergence Tolerance", tol );
anasaziPL.set( "Verbosity", Anasazi::Errors+Anasazi::Warnings+Anasazi::TimingDetails+Anasazi::FinalSummary );
// Create the solver manager
Anasazi::LOBPCGSolMgr<double, MV, OP> anasaziSolver(problem, anasaziPL);
// Solve the problem
Anasazi::ReturnType returnCode = anasaziSolver.solve();
// Get the eigenvalues and eigenvectors from the eigenproblem
Anasazi::Eigensolution<double,MV> sol = problem->getSolution();
std::vector<Anasazi::Value<double> > evals = sol.Evals;
Teuchos::RCP<MV> evecs = sol.Evecs;
// Compute residuals.
std::vector<double> normR(sol.numVecs);
if (sol.numVecs > 0) {
Teuchos::SerialDenseMatrix<int,double> T(sol.numVecs, sol.numVecs);
Epetra_MultiVector tempAevec( *Map, sol.numVecs );
T.putScalar(0.0);
for (int i=0; i<sol.numVecs; i++) {
T(i,i) = evals[i].realpart;
}
A->Apply( *evecs, tempAevec );
MVT::MvTimesMatAddMv( -1.0, *evecs, T, 1.0, tempAevec );
MVT::MvNorm( tempAevec, normR );
}
// Print the results
std::cout<<"Solver manager returned " << (returnCode == Anasazi::Converged ? "converged." : "unconverged.") << std::endl;
std::cout<<std::endl;
std::cout<<"------------------------------------------------------"<<std::endl;
std::cout<<std::setw(16)<<"Eigenvalue"
<<std::setw(18)<<"Direct Residual"
<<std::endl;
std::cout<<"------------------------------------------------------"<<std::endl;
for (int i=0; i<sol.numVecs; i++) {
std::cout<<std::setw(16)<<evals[i].realpart
//.........这里部分代码省略.........
示例11: main
//.........这里部分代码省略.........
// Get the stiffness and mass matrices
Teuchos::RCP<Epetra_CrsMatrix> K = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()), false );
Teuchos::RCP<Epetra_CrsMatrix> M = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()), false );
// Eigensolver parameters
int nev = 10;
int blockSize = 5;
int maxIters = 500;
double tol = 1.0e-8;
Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(K->OperatorDomainMap(), blockSize) );
ivec->Random();
// Create the eigenproblem.
Teuchos::RCP<BasicEigenproblem<double, MV, OP> > MyProblem =
Teuchos::rcp( new BasicEigenproblem<double, MV, OP>(K, M, ivec) );
// Inform the eigenproblem that the operator A is symmetric
MyProblem->setHermitian(true);
// Set the number of eigenvalues requested
MyProblem->setNEV( nev );
// Inform the eigenproblem that you are finishing passing it information
bool boolret = MyProblem->setProblem();
if (boolret != true) {
printer.print(Errors,"Anasazi::BasicEigenproblem::setProblem() returned an error.\n");
throw -1;
}
// Create parameter list to pass into the solver manager
//
Teuchos::ParameterList MyPL;
MyPL.set( "Which", which );
MyPL.set( "Block Size", blockSize );
MyPL.set( "Maximum Iterations", maxIters );
MyPL.set( "Convergence Tolerance", tol );
MyPL.set( "Full Ortho", true );
MyPL.set( "Use Locking", true );
//
// Create the solver manager
LOBPCGSolMgr<double, MV, OP> MySolverMan(MyProblem, MyPL);
// Solve the problem
//
ReturnType returnCode = MySolverMan.solve();
// Get the eigenvalues and eigenvectors from the eigenproblem
//
Eigensolution<double,MV> sol = MyProblem->getSolution();
std::vector<Value<double> > evals = sol.Evals;
Teuchos::RCP<MV> evecs = sol.Evecs;
// Compute residuals.
//
std::vector<double> normR(sol.numVecs);
if (sol.numVecs > 0) {
Teuchos::SerialDenseMatrix<int,double> T(sol.numVecs, sol.numVecs);
Epetra_MultiVector Kvec( K->OperatorDomainMap(), evecs->NumVectors() );
Epetra_MultiVector Mvec( M->OperatorDomainMap(), evecs->NumVectors() );
T.putScalar(0.0);
for (int i=0; i<sol.numVecs; i++) {
T(i,i) = evals[i].realpart;
}
K->Apply( *evecs, Kvec );
M->Apply( *evecs, Mvec );
MVT::MvTimesMatAddMv( -1.0, Mvec, T, 1.0, Kvec );
MVT::MvNorm( Kvec, normR );
}
// Print the results
//
std::ostringstream os;
os.setf(std::ios_base::right, std::ios_base::adjustfield);
os<<"Solver manager returned " << (returnCode == Converged ? "converged." : "unconverged.") << std::endl;
os<<std::endl;
os<<"------------------------------------------------------"<<std::endl;
os<<std::setw(16)<<"Eigenvalue"
<<std::setw(18)<<"Direct Residual"
<<std::endl;
os<<"------------------------------------------------------"<<std::endl;
for (int i=0; i<sol.numVecs; i++) {
os<<std::setw(16)<<evals[i].realpart
<<std::setw(18)<<normR[i]/evals[i].realpart
<<std::endl;
}
os<<"------------------------------------------------------"<<std::endl;
printer.print(Errors,os.str());
success = true;
}
TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return ( success ? EXIT_SUCCESS : EXIT_FAILURE );
}