本文整理汇总了C++中Epetra_SerialComm::Barrier方法的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_SerialComm::Barrier方法的具体用法?C++ Epetra_SerialComm::Barrier怎么用?C++ Epetra_SerialComm::Barrier使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Epetra_SerialComm
的用法示例。
在下文中一共展示了Epetra_SerialComm::Barrier方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[])
{
Teuchos::GlobalMPISession mpiSession(&argc, &argv);
int rank = Teuchos::GlobalMPISession::getRank();
#ifdef HAVE_MPI
Epetra_MpiComm Comm(MPI_COMM_WORLD);
//cout << "rank: " << rank << " of " << numProcs << endl;
#else
Epetra_SerialComm Comm;
#endif
Comm.Barrier(); // set breakpoint here to allow debugger attachment to other MPI processes than the one you automatically attached to.
int numRanks = Teuchos::GlobalMPISession::getNProc();
FieldContainer<IndexType> partitionDofCounts(256);
int myDofs = 0;
if (rank==0)
{
myDofs = 300;
}
partitionDofCounts[rank] = myDofs;
MPIWrapper::entryWiseSum(partitionDofCounts);
int partitionDofOffset = 0; // add this to a local partition dof index to get the global dof index
for (int i=0; i<rank; i++)
{
partitionDofOffset += partitionDofCounts[i];
}
int globalDofCount = partitionDofOffset;
for (int i=rank; i<numRanks; i++)
{
globalDofCount += partitionDofCounts[i];
}
int activeCellCount = 1;
Intrepid::FieldContainer<int> globalCellIDDofOffsets(activeCellCount);
// global copy:
MPIWrapper::entryWiseSum(globalCellIDDofOffsets);
return 0;
}
示例2: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
Teuchos::GlobalMPISession mpiSession(&argc, &argv,0);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
int commRank = Teuchos::GlobalMPISession::getRank();
Comm.Barrier(); // set breakpoint here to allow debugger attachment to other MPI processes than the one you automatically attached to.
Teuchos::CommandLineProcessor cmdp(false,true); // false: don't throw exceptions; true: do return errors for unrecognized options
// problem parameters:
double mu = 0.1;
double permCoef = 1e4;
int numRefs = 0;
int k = 2, delta_k = 2;
string norm = "Graph";
cmdp.setOption("polyOrder",&k,"polynomial order for field variable u");
cmdp.setOption("delta_k", &delta_k, "test space polynomial order enrichment");
cmdp.setOption("numRefs",&numRefs,"number of refinements");
cmdp.setOption("norm", &norm, "norm");
cmdp.setOption("mu", &mu, "mu");
cmdp.setOption("permCoef", &permCoef, "Permeability coefficient");
if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL)
{
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return -1;
}
FunctionPtr zero = TFunction<double>::zero();
FunctionPtr one = TFunction<double>::constant(1);
FunctionPtr sin2pix = Teuchos::rcp( new Sin_ax(2*pi) );
FunctionPtr cos2pix = Teuchos::rcp( new Cos_ax(2*pi) );
FunctionPtr sin2piy = Teuchos::rcp( new Sin_ay(2*pi) );
FunctionPtr cos2piy = Teuchos::rcp( new Cos_ay(2*pi) );
FunctionPtr u1_exact = sin2pix*cos2piy;
FunctionPtr u2_exact = -cos2pix*sin2piy;
FunctionPtr x2 = TFunction<double>::xn(2);
FunctionPtr y2 = TFunction<double>::yn(2);
FunctionPtr p_exact = x2*y2 - 1./9;
FunctionPtr permInv = permCoef*(sin2pix + 1.1);
VarFactoryPtr vf = VarFactory::varFactory();
//fields:
VarPtr sigma1 = vf->fieldVar("sigma1", VECTOR_L2);
VarPtr sigma2 = vf->fieldVar("sigma2", VECTOR_L2);
VarPtr u1 = vf->fieldVar("u1", L2);
VarPtr u2 = vf->fieldVar("u2", L2);
VarPtr p = vf->fieldVar("p", L2);
// traces:
VarPtr u1hat = vf->traceVar("u1hat");
VarPtr u2hat = vf->traceVar("u2hat");
VarPtr t1c = vf->fluxVar("t1c");
VarPtr t2c = vf->fluxVar("t2c");
// test:
VarPtr v1 = vf->testVar("v1", HGRAD);
VarPtr v2 = vf->testVar("v2", HGRAD);
VarPtr tau1 = vf->testVar("tau1", HDIV);
VarPtr tau2 = vf->testVar("tau2", HDIV);
VarPtr q = vf->testVar("q", HGRAD);
BFPtr bf = Teuchos::rcp( new BF(vf) );
bf->addTerm(1./mu*sigma1, tau1);
bf->addTerm(1./mu*sigma2, tau2);
bf->addTerm(u1, tau1->div());
bf->addTerm(u2, tau2->div());
bf->addTerm(-u1hat, tau1->dot_normal());
bf->addTerm(-u2hat, tau2->dot_normal());
bf->addTerm(sigma1, v1->grad());
bf->addTerm(sigma2, v2->grad());
bf->addTerm(-p, v1->dx());
bf->addTerm(-p, v2->dy());
bf->addTerm(t1c, v1);
bf->addTerm(t2c, v2);
bf->addTerm(mu*permInv*u1, v1);
bf->addTerm(mu*permInv*u2, v2);
bf->addTerm(-u1, q->dx());
bf->addTerm(-u2, q->dy());
bf->addTerm(u1hat, q->times_normal_x());
bf->addTerm(u2hat, q->times_normal_y());
RHSPtr rhs = RHS::rhs();
BCPtr bc = BC::bc();
SpatialFilterPtr y_equals_one = SpatialFilter::matchingY(1.0);
//.........这里部分代码省略.........
示例3: main
int main(int argc, char *argv[]) {
int returnierr=0;
using std::cout;
using std::endl;
using std::flush;
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
int size; // Number of MPI processes, My process ID
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size > 1) {
cout << "This example cannot be run on more than one processor!" << endl;
MPI_Finalize();
returnierr = -1;
return returnierr;
}
#endif
bool verbose = false;
// Check if we should print results to standard out
if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;
#ifdef EPETRA_MPI
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
if (!verbose) Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
if (verbose) {
cout << EpetraExt::EpetraExt_Version() << endl << endl;
cout << Comm << endl << flush;
}
Comm.Barrier();
int NumMyElements = 3;
Epetra_Map Map( NumMyElements, 0, Comm );
Epetra_CrsGraph Graph( Copy, Map, 1 );
int index[2];
index[0] = 2;
Graph.InsertGlobalIndices( 0, 1, &index[0] );
index[0] = 0;
index[1] = 2;
Graph.InsertGlobalIndices( 1, 2, &index[0] );
index[0] = 1;
Graph.InsertGlobalIndices( 2, 1, &index[0] );
Graph.FillComplete();
// Create an Epetra::CrsMatrix
Epetra_CrsMatrix Matrix( Copy, Graph );
double value[2];
index[0] = 2;
value[0] = 3.0;
Matrix.ReplaceMyValues( 0, 1, &value[0], &index[0] );
index[0] = 0;
index[1] = 2;
value[0] = 2.0;
value[1] = 2.5;
Matrix.ReplaceMyValues( 1, 2, &value[0], &index[0] );
index[0] = 1;
value[0] = 1.0;
Matrix.ReplaceMyValues( 2, 1, &value[0], &index[0] );
Matrix.FillComplete();
EpetraExt::AmesosBTF_CrsMatrix BTFTrans( 0.0, true, verbose );
Epetra_CrsMatrix & NewMatrix = BTFTrans( Matrix );
if (verbose) {
cout << "*************** PERFORMING BTF TRANSFORM ON CRS_MATRIX **************" <<endl<<endl;
cout << "CrsMatrix *before* BTF transform: " << endl << endl;
cout << Matrix << endl;
}
BTFTrans.fwd();
if (verbose) {
cout << "CrsMatrix *after* BTF transform: " << endl << endl;
cout << NewMatrix << endl;
}
#ifdef EPETRA_MPI
MPI_Finalize();
#endif
return returnierr;
//.........这里部分代码省略.........
示例4: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc, &argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
bool verbose = (Comm.MyPID() == 0);
double TotalResidual = 0.0;
// Create the Map, defined as a grid, of size nx x ny x nz,
// subdivided into mx x my x mz cubes, each assigned to a
// different processor.
#ifndef FILENAME_SPECIFIED_ON_COMMAND_LINE
ParameterList GaleriList;
GaleriList.set("nx", 4);
GaleriList.set("ny", 4);
GaleriList.set("nz", 4 * Comm.NumProc());
GaleriList.set("mx", 1);
GaleriList.set("my", 1);
GaleriList.set("mz", Comm.NumProc());
Epetra_Map* Map = CreateMap("Cartesian3D", Comm, GaleriList);
// Create a matrix, in this case corresponding to a 3D Laplacian
// discretized using a classical 7-point stencil. Please refer to
// the Galeri documentation for an overview of available matrices.
//
// NOTE: matrix must be symmetric if DSCPACK is used.
Epetra_CrsMatrix* Matrix = CreateCrsMatrix("Laplace3D", Map, GaleriList);
#else
bool transpose = false ;
bool distribute = false ;
bool symmetric ;
Epetra_CrsMatrix *Matrix = 0 ;
Epetra_Map *Map = 0 ;
MyCreateCrsMatrix( argv[1], Comm, Map, transpose, distribute, symmetric, Matrix ) ;
#endif
// build vectors, in this case with 1 vector
Epetra_MultiVector LHS(*Map, 1);
Epetra_MultiVector RHS(*Map, 1);
// create a linear problem object
Epetra_LinearProblem Problem(Matrix, &LHS, &RHS);
// use this list to set up parameters, now it is required
// to use all the available processes (if supported by the
// underlying solver). Uncomment the following two lines
// to let Amesos print out some timing and status information.
ParameterList List;
List.set("PrintTiming",true);
List.set("PrintStatus",true);
List.set("MaxProcs",Comm.NumProc());
std::vector<std::string> SolverType;
SolverType.push_back("Amesos_Paraklete");
SolverType.push_back("Amesos_Klu");
Comm.Barrier() ;
#if 1
SolverType.push_back("Amesos_Lapack");
SolverType.push_back("Amesos_Umfpack");
SolverType.push_back("Amesos_Pardiso");
SolverType.push_back("Amesos_Taucs");
SolverType.push_back("Amesos_Superlu");
SolverType.push_back("Amesos_Superludist");
SolverType.push_back("Amesos_Mumps");
SolverType.push_back("Amesos_Dscpack");
SolverType.push_back("Amesos_Scalapack");
#endif
Epetra_Time Time(Comm);
// this is the Amesos factory object that will create
// a specific Amesos solver.
Amesos Factory;
// Cycle over all solvers.
// Only installed solvers will be tested.
for (unsigned int i = 0 ; i < SolverType.size() ; ++i)
{
// Check whether the solver is available or not
if (Factory.Query(SolverType[i]))
{
// 1.- set exact solution (constant vector)
LHS.PutScalar(1.0);
// 2.- create corresponding rhs
Matrix->Multiply(false, LHS, RHS);
// 3.- randomize solution vector
LHS.Random();
// 4.- create the amesos solver object
Amesos_BaseSolver* Solver = Factory.Create(SolverType[i], Problem);
assert (Solver != 0);
//.........这里部分代码省略.........
示例5: main
int main(int argc, char *argv[]) {
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init( &argc, &argv );
//int size, rank; // Number of MPI processes, My process ID
//MPI_Comm_size(MPI_COMM_WORLD, &size);
//MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#else
//int size = 1; // Serial case (not using MPI)
//int rank = 0;
#endif
bool verbose = false;
int nx = 5;
int ny = 5;
if( argc > 1 )
{
if( argc > 4 )
{
cout << "Usage: " << argv[0] << " [-v [nx [ny]]]" << endl;
exit(1);
}
int loc = 1;
// Check if we should print results to standard out
if(argv[loc][0]=='-' && argv[loc][1]=='v')
{ verbose = true; ++loc; }
if (loc < argc) nx = atoi( argv[loc++] );
if( loc < argc) ny = atoi( argv[loc] );
}
#ifdef EPETRA_MPI
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
bool verbose1 = false;
if(verbose) verbose1 = (MyPID==0);
if(verbose1)
cout << EpetraExt::EpetraExt_Version() << endl << endl;
Comm.Barrier();
if(verbose) cout << Comm << endl << flush;
Comm.Barrier();
int NumGlobalElements = nx * ny;
if( NumGlobalElements < NumProc )
{
cout << "NumGlobalElements = " << NumGlobalElements <<
" cannot be < number of processors = " << NumProc;
exit(1);
}
int IndexBase = 0;
Epetra_Map Map( NumGlobalElements, IndexBase, Comm );
// Extract the global indices of the elements local to this processor
int NumMyElements = Map.NumMyElements();
std::vector<int> MyGlobalElements( NumMyElements );
Map.MyGlobalElements( &MyGlobalElements[0] );
if( verbose ) cout << Map;
// Create the number of non-zeros for a tridiagonal (1D problem) or banded
// (2D problem) matrix
std::vector<int> NumNz( NumMyElements, 5 );
int global_i;
int global_j;
for (int i = 0; i < NumMyElements; ++i)
{
global_j = MyGlobalElements[i] / nx;
global_i = MyGlobalElements[i] - global_j * nx;
if (global_i == 0) NumNz[i] -= 1; // By having separate statements,
if (global_i == nx-1) NumNz[i] -= 1; // this works for 2D as well as 1D
if (global_j == 0) NumNz[i] -= 1; // systems (i.e. nx x 1 or 1 x ny)
if (global_j == ny-1) NumNz[i] -= 1; // or even a 1 x 1 system
}
if(verbose)
{
cout << endl << "NumNz: ";
for (int i = 0; i < NumMyElements; i++) cout << NumNz[i] << " ";
cout << endl;
} // end if
//.........这里部分代码省略.........
示例6: main
int main(int argc, char *argv[]) {
int ierr=0, returnierr=0;
#ifdef EPETRA_MPI
MPI_Init(&argc,&argv);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
bool verbose = false;
// Check if we should print results to standard out
if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;
if (!verbose) {
Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
}
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
if (verbose && MyPID==0)
cout << Epetra_Version() << endl << endl;
if (verbose) cout << Comm << endl;
bool verbose1 = verbose;
if (verbose) verbose = (MyPID==0);
int NumMyElements = 10000;
int NumMyElements1 = NumMyElements; // Used for local map
long long NumGlobalElements = NumMyElements*NumProc+EPETRA_MIN(NumProc,3);
if (MyPID < 3) NumMyElements++;
int IndexBase = 0;
bool DistributedGlobal = (NumGlobalElements>NumMyElements);
Epetra_Map* Map;
// Test exceptions
if (verbose)
cout << "*******************************************************************************************" << endl
<< " Testing Exceptions (Expect error messages if EPETRA_NO_ERROR_REPORTS is not defined" << endl
<< "*******************************************************************************************" << endl
<< endl << endl;
try {
if (verbose) cout << "Checking Epetra_Map(-2, IndexBase, Comm)" << endl;
Map = new Epetra_Map((long long)-2, IndexBase, Comm);
}
catch (int Error) {
if (Error!=-1) {
if (Error!=0) {
EPETRA_TEST_ERR(Error,returnierr);
if (verbose) cout << "Error code should be -1" << endl;
}
else {
cout << "Error code = " << Error << "Should be -1" << endl;
returnierr+=1;
}
}
else if (verbose) cout << "Checked OK\n\n" << endl;
}
try {
if (verbose) cout << "Checking Epetra_Map(2, 3, IndexBase, Comm)" << endl;
Map = new Epetra_Map((long long)2, 3, IndexBase, Comm);
}
catch (int Error) {
if (Error!=-4) {
if (Error!=0) {
EPETRA_TEST_ERR(Error,returnierr);
if (verbose) cout << "Error code should be -4" << endl;
}
else {
cout << "Error code = " << Error << "Should be -4" << endl;
returnierr+=1;
}
}
else if (verbose) cout << "Checked OK\n\n" << endl;
}
if (verbose) cerr << flush;
if (verbose) cout << flush;
Comm.Barrier();
if (verbose)
cout << endl << endl
<< "*******************************************************************************************" << endl
<< " Testing valid constructor now......................................................" << endl
<< "*******************************************************************************************" << endl
<< endl << endl;
// Test Epetra-defined uniform linear distribution constructor
Map = new Epetra_Map(NumGlobalElements, IndexBase, Comm);
if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, IndexBase, Comm)" << endl;
ierr = checkmap(*Map, NumGlobalElements, NumMyElements, 0,
IndexBase, Comm, DistributedGlobal);
//.........这里部分代码省略.........
示例7: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
Teuchos::GlobalMPISession mpiSession(&argc, &argv,0);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
int commRank = Teuchos::GlobalMPISession::getRank();
Comm.Barrier(); // set breakpoint here to allow debugger attachment to other MPI processes than the one you automatically attached to.
Teuchos::CommandLineProcessor cmdp(false,true); // false: don't throw exceptions; true: do return errors for unrecognized options
// problem parameters:
int spaceDim = 2;
double Re = 40;
bool steady = false;
string problemChoice = "TaylorGreen";
int numRefs = 1;
int p = 2, delta_p = 2;
int numXElems = 1;
int numTElems = 1;
int numSlabs = 1;
bool useConformingTraces = false;
string solverChoice = "KLU";
string multigridStrategyString = "W-cycle";
bool useCondensedSolve = false;
bool useConjugateGradient = true;
bool logFineOperator = false;
// double solverTolerance = 1e-8;
double nonlinearTolerance = 1e-5;
// int maxLinearIterations = 10000;
int maxNonlinearIterations = 20;
int cgMaxIterations = 10000;
double cgTol = 1e-8;
bool computeL2Error = false;
bool exportSolution = false;
bool saveSolution = false;
bool loadSolution = false;
int loadRef = 0;
int loadDirRef = 0;
string norm = "Graph";
string rootDir = ".";
string tag="";
cmdp.setOption("spaceDim", &spaceDim, "spatial dimension");
cmdp.setOption("Re", &Re, "Re");
cmdp.setOption("steady", "transient", &steady, "use steady incompressible Navier-Stokes");
cmdp.setOption("problem", &problemChoice, "Kovasznay, TaylorGreen");
cmdp.setOption("polyOrder",&p,"polynomial order for field variable u");
cmdp.setOption("delta_p", &delta_p, "test space polynomial order enrichment");
cmdp.setOption("numRefs",&numRefs,"number of refinements");
cmdp.setOption("numXElems",&numXElems,"number of elements in x direction");
cmdp.setOption("numTElems",&numTElems,"number of elements in t direction");
cmdp.setOption("numSlabs",&numSlabs,"number of time slabs to use");
cmdp.setOption("norm", &norm, "norm");
cmdp.setOption("conformingTraces", "nonconformingTraces", &useConformingTraces, "use conforming traces");
cmdp.setOption("solver", &solverChoice, "KLU, SuperLU, MUMPS, GMG-Direct, GMG-ILU, GMG-IC");
cmdp.setOption("multigridStrategy", &multigridStrategyString, "Multigrid strategy: V-cycle, W-cycle, Full, or Two-level");
cmdp.setOption("useCondensedSolve", "useStandardSolve", &useCondensedSolve);
cmdp.setOption("CG", "GMRES", &useConjugateGradient);
cmdp.setOption("logFineOperator", "dontLogFineOperator", &logFineOperator);
// cmdp.setOption("solverTolerance", &solverTolerance, "iterative solver tolerance");
cmdp.setOption("nonlinearTolerance", &nonlinearTolerance, "nonlinear solver tolerance");
// cmdp.setOption("maxLinearIterations", &maxLinearIterations, "maximum number of iterations for linear solver");
cmdp.setOption("maxNonlinearIterations", &maxNonlinearIterations, "maximum number of iterations for Newton solver");
cmdp.setOption("exportDir", &rootDir, "export directory");
cmdp.setOption("computeL2Error", "skipL2Error", &computeL2Error, "compute L2 error");
cmdp.setOption("exportSolution", "skipExport", &exportSolution, "export solution to HDF5");
cmdp.setOption("saveSolution", "skipSave", &saveSolution, "save mesh and solution to HDF5");
cmdp.setOption("loadSolution", "skipLoad", &loadSolution, "load mesh and solution from HDF5");
cmdp.setOption("loadRef", &loadRef, "load refinement number");
cmdp.setOption("loadDirRef", &loadDirRef, "which refinement directory to load from");
cmdp.setOption("tag", &tag, "output tag");
if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL)
{
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return -1;
}
map<string, Teuchos::RCP<IncompressibleProblem>> problems;
problems["ManufacturedSolution"] = Teuchos::rcp(new IncompressibleManufacturedSolution(steady, Re, numXElems));
problems["Kovasznay"] = Teuchos::rcp(new KovasznayProblem(steady, Re));
problems["TaylorGreen"] = Teuchos::rcp(new TaylorGreenProblem(steady, Re, numXElems, numSlabs));
problems["Cylinder"] = Teuchos::rcp(new CylinderProblem(steady, Re, numSlabs));
problems["SquareCylinder"] = Teuchos::rcp(new SquareCylinderProblem(steady, Re, numSlabs));
Teuchos::RCP<IncompressibleProblem> problem = problems.at(problemChoice);
// if (commRank == 0)
// {
// Solver::printAvailableSolversReport();
// cout << endl;
// }
Teuchos::RCP<Time> totalTimer = Teuchos::TimeMonitor::getNewCounter("Total Time");
//.........这里部分代码省略.........
示例8: Eigenvectors
// ***********************************************************
int DG_Prob::Eigenvectors(const double Dt,
const Epetra_Map & Map)
{
printf("Entrou em Eigenvectors\n");
#ifdef HAVE_MPI
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
//MPI::COMM_WORLD.Barrier();
Comm.Barrier();
Teuchos::RCP<Epetra_FECrsMatrix> M = Teuchos::rcp(new Epetra_FECrsMatrix(Copy, Map,0));//&NNz[0]);
Teuchos::RCP<Epetra_FEVector> RHS = Teuchos::rcp(new Epetra_FEVector(Map,1));
DG_MatrizVetor_Epetra(Dt,M,RHS);
Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp(new Epetra_CrsMatrix(Copy, Map,0
/* &NNz[0]*/) );
Epetra_Export Exporter(Map,Map);
A->PutScalar(0.0);
A->Export(*(M.ptr()),Exporter,Add);
A->FillComplete();
using std::cout;
// int nx = 5;
bool boolret;
int MyPID = Comm.MyPID();
bool verbose = true;
bool debug = false;
std::string which("LR");
Teuchos::CommandLineProcessor cmdp(false,true);
cmdp.setOption("verbose","quiet",&verbose,"Print messages and results.");
cmdp.setOption("debug","nodebug",&debug,"Print debugging information.");
cmdp.setOption("sort",&which,"Targetted eigenvalues (SM,LM,SR,LR,SI,or LI).");
typedef double ScalarType;
typedef Teuchos::ScalarTraits<ScalarType> SCT;
typedef SCT::magnitudeType MagnitudeType;
typedef Epetra_MultiVector MV;
typedef Epetra_Operator OP;
typedef Anasazi::MultiVecTraits<ScalarType,MV> MVT;
typedef Anasazi::OperatorTraits<ScalarType,MV,OP> OPT;
// double rho = 2*nx+1;
// Compute coefficients for discrete convection-diffution operator
// const double one = 1.0;
// int NumEntries, info;
//************************************
// Start the block Arnoldi iteration
//***********************************
//
// Variables used for the Block Krylov Schur Method
//
int nev = 10;
int blockSize = 1;
int numBlocks = 20;
int maxRestarts = 500;
//int stepSize = 5;
double tol = 1e-8;
// Create a sort manager to pass into the block Krylov-Schur solver manager
// --> Make sure the reference-counted pointer is of type Anasazi::SortManager<>
// --> The block Krylov-Schur solver manager uses Anasazi::BasicSort<> by default,
// so you can also pass in the parameter "Which", instead of a sort manager.
Teuchos::RCP<Anasazi::SortManager<MagnitudeType> > MySort =
Teuchos::rcp( new Anasazi::BasicSort<MagnitudeType>( which ) );
// Set verbosity level
int verbosity = Anasazi::Errors + Anasazi::Warnings;
if (verbose) {
verbosity += Anasazi::FinalSummary + Anasazi::TimingDetails;
}
if (debug) {
verbosity += Anasazi::Debug;
}
//
// Create parameter list to pass into solver manager
//
Teuchos::ParameterList MyPL;
MyPL.set( "Verbosity", verbosity );
MyPL.set( "Sort Manager", MySort );
//MyPL.set( "Which", which );
MyPL.set( "Block Size", blockSize );
MyPL.set( "Num Blocks", numBlocks );
MyPL.set( "Maximum Restarts", maxRestarts );
//MyPL.set( "Step Size", stepSize );
MyPL.set( "Convergence Tolerance", tol );
// Create an Epetra_MultiVector for an initial vector to start the solver.
// Note: This needs to have the same number of columns as the blocksize.
Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(Map, blockSize) );
//.........这里部分代码省略.........
示例9: main
int main(int argc, char *argv[]) {
// Initialize MPI
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
bool verbose = false;
bool success = false;
try {
int globalLength = 100; // This should suffice
if (argc > 1)
if (argv[1][0]=='-' && argv[1][1]=='v')
verbose = true;
// Get the process ID and the total number of processors
int MyPID = Comm.MyPID();
// Set up the printing utilities
Teuchos::RCP<Teuchos::ParameterList> noxParamsPtr =
Teuchos::rcp(new Teuchos::ParameterList);
Teuchos::ParameterList& noxParams = *(noxParamsPtr.get());
// Only print output if the "-v" flag is set on the command line
Teuchos::ParameterList& printParams = noxParams.sublist("Printing");
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 5);
printParams.set("Output Processor", 0);
if( verbose )
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::TestDetails);
else
printParams.set("Output Information", NOX::Utils::Error);
NOX::Utils printing(printParams);
// Identify the test problem
if (printing.isPrintType(NOX::Utils::TestDetails))
printing.out() << "Starting epetra/NOX_Vector/NOX_Vector.exe" << std::endl;
// Create a TestCompare class
NOX::TestCompare tester( printing.out(), printing);
double tolerance = 1.e-12;
NOX::TestCompare::CompareType aComp = NOX::TestCompare::Absolute;
// Identify processor information
#ifdef HAVE_MPI
printing.out() << "Parallel Run" << std::endl;
printing.out() << "Number of processors = " << Comm.NumProc() << std::endl;
printing.out() << "Print Process = " << MyPID << std::endl;
Comm.Barrier();
if (printing.isPrintType(NOX::Utils::TestDetails))
printing.out() << "Process " << MyPID << " is alive!" << std::endl;
Comm.Barrier();
#else
printing.out() << "Serial Run" << std::endl;
#endif
// Create a map describing data distribution
Epetra_Map * standardMap = new Epetra_Map(globalLength, 0, Comm);
// Return value
int status = 0;
// *** Start Testing Here!!! ***
// First create the Epetra_Vector needed to construct our NOX vector
Epetra_Vector * epetraVec = new Epetra_Vector(*standardMap, true);
NOX::Epetra::Vector * noxVec1 = new NOX::Epetra::Vector(*epetraVec, NOX::DeepCopy);
delete epetraVec; epetraVec = 0;
NOX::Epetra::Vector * noxVec2 = new NOX::Epetra::Vector(*noxVec1);
noxVec2->init(1.0);
// Test our norms
NOX::Abstract::Vector::NormType
oneNorm = NOX::Abstract::Vector::OneNorm,
twoNorm = NOX::Abstract::Vector::TwoNorm,
infNorm = NOX::Abstract::Vector::MaxNorm;
double expectedOneNorm = (double) globalLength,
expectedTwoNorm = sqrt( (double) globalLength),
expectedInfNorm = 1.0;
status += tester.testValue( noxVec2->norm(oneNorm), expectedOneNorm,
tolerance, "One-Norm Test", aComp);
//.........这里部分代码省略.........
示例10: main
int main(int argc, char *argv[])
{
int i;
// Initialize MPI
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
// Create a communicator for Epetra objects
#ifdef HAVE_MPI
Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
Epetra_SerialComm Comm;
#endif
bool verbose = false;
if (argc > 1)
if (argv[1][0]=='-' && argv[1][1]=='v')
verbose = true;
// Get the process ID and the total number of processors
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
int NumGlobalElements = 2; // Hardcoded for D&S Example problem
// A maximum of 2 procesors is allowed since there are only 2 equations
if (NumProc >= 3) {
std::cout << "ERROR: Maximum number of processors is 2!" << std::endl;
exit(1);
}
// Create the Problem class. This creates all required
// Epetra objects for the problem and allows calls to the
// function (RHS) and Jacobian evaluation routines.
DennisSchnabel Problem(NumGlobalElements, Comm);
// Get the vector from the Problem
Teuchos::RCP<Epetra_Vector> soln = Problem.getSolution();
NOX::Epetra::Vector noxSoln(soln, NOX::Epetra::Vector::CreateView);
// Initialize Solution
if (MyPID==0) {
(*soln)[0]=2.0;
if (NumProc==1)
(*soln)[1]=0.5;
}
else
(*soln)[0]=0.5;
// Begin Nonlinear Solver ************************************
// Create the top level parameter list
Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
Teuchos::rcp(new Teuchos::ParameterList);
Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());
// Set the nonlinear solver method
nlParams.set("Nonlinear Solver", "Line Search Based");
// Set the printing parameters in the "Printing" sublist
Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 5);
printParams.set("Output Processor", 0);
if ( verbose )
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::TestDetails);
else
printParams.set("Output Information", NOX::Utils::Error +
NOX::Utils::TestDetails);
NOX::Utils printing(printParams);
// Identify the test problem
if (printing.isPrintType(NOX::Utils::TestDetails))
std::cout << "Starting epetra/DS6.5.1/DS_6_5_1.exe" << std::endl;
// Identify processor information
#ifdef HAVE_MPI
if (printing.isPrintType(NOX::Utils::TestDetails)) {
std::cout << "Parallel Run" << std::endl;
std::cout << "Number of processors = " << NumProc << std::endl;
std::cout << "Print Process = " << MyPID << std::endl;
}
Comm.Barrier();
if (printing.isPrintType(NOX::Utils::TestDetails))
std::cout << "Process " << MyPID << " is alive!" << std::endl;
Comm.Barrier();
#else
if (printing.isPrintType(NOX::Utils::TestDetails))
std::cout << "Serial Run" << std::endl;
#endif
//.........这里部分代码省略.........
示例11: main
//.........这里部分代码省略.........
EPETRA_TEST_ERR(!(A.IndicesAreGlobal()),ierr);
EPETRA_TEST_ERR(!(A.FillComplete()==0),ierr);
EPETRA_TEST_ERR(!(A.IndicesAreLocal()),ierr);
EPETRA_TEST_ERR(A.StorageOptimized(),ierr);
A.OptimizeStorage();
EPETRA_TEST_ERR(!(A.StorageOptimized()),ierr);
EPETRA_TEST_ERR(A.UpperTriangular(),ierr);
EPETRA_TEST_ERR(A.LowerTriangular(),ierr);
if(verbose) cout << "\n*****Testing variable entry constructor\n" << endl;
int NumMyNonzeros = 3 * NumMyEquations;
if(A.LRID(0) >= 0)
NumMyNonzeros--; // If I own first global row, then there is one less nonzero
if(A.LRID(NumGlobalEquations-1) >= 0)
NumMyNonzeros--; // If I own last global row, then there is one less nonzero
EPETRA_TEST_ERR(check(A, NumMyEquations, NumGlobalEquations, NumMyNonzeros, 3*NumGlobalEquations-2,
MyGlobalElements, verbose),ierr);
forierr = 0;
for(i = 0; i < NumMyEquations; i++)
forierr += !(A.NumGlobalIndices(MyGlobalElements[i])==NumNz[i]+1);
EPETRA_TEST_ERR(forierr,ierr);
for(i = 0; i < NumMyEquations; i++)
forierr += !(A.NumMyIndices(i)==NumNz[i]+1);
EPETRA_TEST_ERR(forierr,ierr);
if(verbose) cout << "NumIndices function check OK" << endl;
delete &A;
if(debug) Comm.Barrier();
if(verbose) cout << "\n*****Testing constant entry constructor\n" << endl;
Epetra_CrsGraph& AA = *new Epetra_CrsGraph(Copy, Map, 5);
if(debug) Comm.Barrier();
for(i = 0; i < NumMyEquations; i++)
AA.InsertGlobalIndices(MyGlobalElements[i], 1, MyGlobalElements+i);
// Note: All processors will call the following Insert routines, but only the processor
// that owns it will actually do anything
long long One = 1;
if(AA.MyGlobalRow(0)) {
EPETRA_TEST_ERR(!(AA.InsertGlobalIndices(0, 0, &One)==0),ierr);
}
else
EPETRA_TEST_ERR(!(AA.InsertGlobalIndices(0, 1, &One)==-2),ierr);
EPETRA_TEST_ERR(!(AA.FillComplete()==0),ierr);
EPETRA_TEST_ERR(AA.StorageOptimized(),ierr);
EPETRA_TEST_ERR(!(AA.UpperTriangular()),ierr);
EPETRA_TEST_ERR(!(AA.LowerTriangular()),ierr);
if(debug) Comm.Barrier();
EPETRA_TEST_ERR(check(AA, NumMyEquations, NumGlobalEquations, NumMyEquations, NumGlobalEquations,
MyGlobalElements, verbose),ierr);
if(debug) Comm.Barrier();
forierr = 0;
for(i = 0; i < NumMyEquations; i++)
示例12: main
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc, &argv);
// define an Epetra communicator
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
// get the proc ID of this process
int MyPID = Comm.MyPID();
// get the total number of processes
int NumProc = Comm.NumProc();
// output some information to std output
cout << Comm << endl;
// ======================== //
// now some basic MPI calls //
// ------------------------ //
int ivalue;
double dvalue, dvalue2;
double* dvalues; dvalues = new double[NumProc];
double* dvalues2; dvalues2 = new double[NumProc];
int root = 0;
// equivalent to MPI_Barrier
Comm.Barrier();
if (MyPID == root) dvalue = 12.0;
// On input, the root processor contains the list of values
// (in this case, a single value). On exit, all processes will
// have he same list of values. Note that all values must be allocated
// vefore the broadcast
// equivalent to MPI_Broadcast
Comm.Broadcast(&dvalue, 1, root);
// as before, but with integer values. As C++ can bind to the appropriate
// interface based on argument typing, the type of data is not required.
Comm.Broadcast(&ivalue, 1, root);
// equivalent MPI_Allgather
Comm.GatherAll(dvalues, dvalues2, 1);
// equivalent to MPI_Allreduce with MPI_SUM
dvalue = 1.0*MyPID;
Comm.SumAll( &dvalue, dvalues, 1);
// equivalent to MPI_Allreduce with MPI_SUM
Comm.MaxAll( &dvalue, dvalues, 1);
// equiavant to MPI_Scan with MPI_SUM
dvalue = 1.0 * MyPID;
Comm.ScanSum(&dvalue, &dvalue2, 1);
cout << "On proc " << MyPID << " dvalue2 = " << dvalue2 << endl;
delete[] dvalues;
delete[] dvalues2;
// ======================= //
// Finalize MPI and return //
// ----------------------- //
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return( EXIT_SUCCESS );
} /* main */
示例13: main
int main(int argc, char * argv[]) {
// Set up the Epetra communicator
#ifdef EPETRA_MPI
MPI_Init(&argc, &argv);
int size; // Number of MPI processes
int rank; // My process ID
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
int size = 1; // Serial case (not using MPI)
int rank = 0; // Processor ID
Epetra_SerialComm Comm;
#endif
// cout << Comm << endl;
int MyPID = Comm.MyPID();
int NumProc = Comm.NumProc();
bool verbose = (MyPID == 0);
cout << "MyPID = " << MyPID << endl
<< "NumProc = " << NumProc << endl;
// Get the problem size from the command line argument
if (argc < 2 || argc > 3) {
if (verbose)
cout << "Usage: " << argv[0] << " nx [ny]" << endl;
exit(1);
} // end if
int nx = atoi(argv[1]); // Get the dimensions for a 1D or 2D
int ny = 1; // central difference problem
if (argc == 3) ny = atoi(argv[2]);
int NumGlobalElements = nx * ny;
if (NumGlobalElements < NumProc) {
if (verbose)
cout << "numGlobalElements = " << NumGlobalElements
<< " cannot be < number of processors = " << NumProc << endl;
exit(1);
} // end if
// Epetra distribution map
int IndexBase = 0; // Zero-based indices
Epetra_Map Map(NumGlobalElements, IndexBase, Comm);
// if (verbose) cout << Map << endl;
// Extract the global indices of the elements local to this processor
int NumMyElements = Map.NumMyElements();
int * MyGlobalElements = new int[NumMyElements];
Map.MyGlobalElements(MyGlobalElements);
for (int p = 0; p < NumProc; p++)
if (p == MyPID) {
cout << endl << "Processor " << MyPID << ": Global elements = ";
for (int i = 0; i < NumMyElements; i++)
cout << MyGlobalElements[i] << " ";
cout << endl;
Comm.Barrier();
} // end if
// Create the number of non-zeros for a tridiagonal (1D problem) or banded
// (2D problem) matrix
int * NumNz = new int[NumMyElements];
int global_i;
int global_j;
for (int i = 0; i < NumMyElements; i++) {
NumNz[i] = 5;
global_j = MyGlobalElements[i] / nx;
global_i = MyGlobalElements[i] - global_j * nx;
if (global_i == 0) NumNz[i] -= 1; // By having separate statements,
if (global_i == nx-1) NumNz[i] -= 1; // this works for 2D as well as 1D
if (global_j == 0) NumNz[i] -= 1; // systems (i.e. nx x 1 or 1 x ny)
if (global_j == ny-1) NumNz[i] -= 1; // or even a 1 x 1 system
}
if (verbose) {
cout << endl << "NumNz: ";
for (int i = 0; i < NumMyElements; i++) cout << NumNz[i] << " ";
cout << endl;
} // end if
// Create the Epetra Compressed Row Sparse matrix
// Note: the actual values for the matrix entries are meaningless for
// this exercise, but I assign them anyway.
Epetra_CrsMatrix A(Copy, Map, NumNz);
double * Values = new double[4];
for (int i = 0; i < 4; i++) Values[i] = -1.0;
int * Indices = new int[4];
double diag = 2.0;
if (ny > 1) diag = 4.0;
int NumEntries;
for (int i = 0; i < NumMyElements; i++) {
global_j = MyGlobalElements[i] / nx;
global_i = MyGlobalElements[i] - global_j * nx;
NumEntries = 0;
if (global_j > 0 && ny > 1)
Indices[NumEntries++] = global_i + (global_j-1)*nx;
if (global_i > 0)
Indices[NumEntries++] = global_i-1 + global_j *nx;
if (global_i < nx-1)
Indices[NumEntries++] = global_i+1 + global_j *nx;
//.........这里部分代码省略.........
示例14: main
//.........这里部分代码省略.........
// scaling->addColSumScaling(NOX::Epetra::Scaling::Right, scalingVector);
// Create transpose scaling object
Teuchos::RCP<NOX::Epetra::Scaling> trans_scaling = Teuchos::null;
// trans_scaling = Teuchos::rcp(new NOX::Epetra::Scaling);
// Teuchos::RCP<Epetra_Vector> transScalingVector =
// Teuchos::rcp(new Epetra_Vector(soln.Map()));
// trans_scaling->addRowSumScaling(NOX::Epetra::Scaling::Right,
// transScalingVector);
// trans_scaling->addColSumScaling(NOX::Epetra::Scaling::Left,
// transScalingVector);
//bifurcationList.set("Transpose Scaling", trans_scaling);
// Build the linear system solver
Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys =
Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(printParams, lsParams,
iReq,
iJac, A,
noxInitGuess, scaling)); // use if scaling
// noxInitGuess)); // use if no scaling
// Create the Loca (continuation) vector
NOX::Epetra::Vector locaSoln(noxInitGuess);
// Create Epetra Factory
Teuchos::RCP<LOCA::Abstract::Factory> epetraFactory = Teuchos::rcp(new LOCA::Epetra::Factory);
// Create global data object
Teuchos::RCP<LOCA::GlobalData> globalData = LOCA::createGlobalData(ParamList, epetraFactory);
// Create the Group - must be LOCA group
Teuchos::RCP<LOCA::Epetra::Group> grpPtr =
Teuchos::rcp(new LOCA::Epetra::Group(globalData, printParams,
iReq, locaSoln,
linSys, p));
// Calculate the first F(x0) as a starting point. This is only needed for
// certain status tests, to ensure that an initial residual (|r0|) is calculated
grpPtr->computeF();
// Set up the status tests to check for convergence
// Determines the error tolerance for the Newton solves
Teuchos::RCP<NOX::StatusTest::NormF> testNormF =
Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-4));
// Sets the max number of nonlinear (Newton) iterations that will be taken. If this is not
// already set, it will default to the '20' given
Teuchos::RCP<NOX::StatusTest::MaxIters> testMaxIters =
Teuchos::rcp(new NOX::StatusTest::MaxIters(stepperList.get("Max Nonlinear Iterations", 20)));
// This combination of tests will be used by NOX to determine whether the step converged
Teuchos::RCP<NOX::StatusTest::Combo> combo =
Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR,
testNormF, testMaxIters));
// This is sample code to write and read parameters to/from a file. Currently not activated!
// To use, change the 'XXXHAVE_TEUCHOS_EXTENDED' TO 'HAVE_TEUCHOS_EXTENDED'
#ifdef XXXHAVE_TEUCHOS_EXTENDED
// Write the parameter list to a file
cout << "Writing parameter list to \"input.xml\"" << endl;
Teuchos::writeParameterListToXmlFile(*ParamList, "input.xml");
// Read in the parameter list from a file
cout << "Reading parameter list from \"input.xml\"" << endl;
Teuchos::RCP<Teuchos::ParameterList> paramList2 =
Teuchos::rcp(new Teuchos::ParameterList);
Teuchos::updateParametersFromXmlFile("input.xml", paramList2.get());
ParamList = paramList2;
#endif
// Create the stepper
LOCA::Stepper stepper(globalData, grpPtr, combo, ParamList);
LOCA::Abstract::Iterator::IteratorStatus status = stepper.run();
// Check if the stepper completed
if (status == LOCA::Abstract::Iterator::Finished)
globalData->locaUtils->out() << "\nAll tests passed!" << endl;
else
if (globalData->locaUtils->isPrintType(NOX::Utils::Error))
globalData->locaUtils->out() << "\nStepper failed to converge!" << endl;
// Output the stepper parameter list info
if (globalData->locaUtils->isPrintType(NOX::Utils::StepperParameters)) {
globalData->locaUtils->out() << endl << "Final Parameters" << endl
<< "*******************" << endl;
stepper.getList()->print(globalData->locaUtils->out());
globalData->locaUtils->out() << endl;
}
// Make sure all processors are done and close the output file
Comm.Barrier();
outFile.close();
// Deallocate memory
LOCA::destroyGlobalData(globalData);
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return(EXIT_SUCCESS);
} // DONE!!
示例15: main
int main(int argc, char *argv[]) {
int returnierr=0;
using std::cout;
using std::endl;
using std::flush;
#ifdef EPETRA_MPI
// Initialize MPI
MPI_Init(&argc,&argv);
int size; // Number of MPI processes, My process ID
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size > 1) {
cout << "This example cannot be run on more than one processor!" << endl;
MPI_Finalize();
returnierr = -1;
return returnierr;
}
#endif
bool verbose = false;
// Check if we should print results to standard out
if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;
#ifdef EPETRA_MPI
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
if (!verbose) Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
if (verbose) {
cout << EpetraExt::EpetraExt_Version() << endl << endl;
cout << Comm << endl << flush;
}
Comm.Barrier();
int NumMyElements = 3;
Epetra_Map Map( NumMyElements, 0, Comm );
Epetra_CrsGraph Graph( Copy, Map, 1 );
int index[2];
index[0] = 2;
Graph.InsertGlobalIndices( 0, 1, &index[0] );
index[0] = 0;
index[1] = 2;
Graph.InsertGlobalIndices( 1, 2, &index[0] );
index[0] = 1;
Graph.InsertGlobalIndices( 2, 1, &index[0] );
Graph.FillComplete();
if (verbose) {
cout << "***************** PERFORMING BTF TRANSFORM ON CRS_GRAPH *****************" <<endl<<endl;
cout << "CrsGraph *before* BTF transform: " << endl << endl;
cout << Graph << endl;
}
EpetraExt::AmesosBTF_CrsGraph BTFTrans( true, verbose );
Epetra_CrsGraph & NewBTFGraph = BTFTrans( Graph );
if (verbose) {
cout << "CrsGraph *after* BTF transform: " << endl << endl;
cout << NewBTFGraph << endl;
}
// Use BTF graph transformation to solve linear system.
// Create an Epetra::CrsMatrix
Epetra_CrsMatrix Matrix( Copy, Graph );
double value[2];
index[0] = 2; value[0] = 3.0;
Matrix.ReplaceMyValues( 0, 1, &value[0], &index[0] );
index[0] = 0; index[1] = 2;
value[0] = 2.0; value[1] = 2.5;
Matrix.ReplaceMyValues( 1, 2, &value[0], &index[0] );
index[0] = 1; value[0] = 1.0;
Matrix.ReplaceMyValues( 2, 1, &value[0], &index[0] );
Matrix.FillComplete();
// Create the solution and right-hand side vectors.
Epetra_MultiVector RHS( Map, 1 ), LHS( Map, 1 );
LHS.PutScalar( 0.0 );
RHS.ReplaceMyValue( 0, 0, 3.0 );
RHS.ReplaceMyValue( 1, 0, 4.5 );
RHS.ReplaceMyValue( 2, 0, 1.0 );
Epetra_LinearProblem problem( &Matrix, &LHS, &RHS );
if (verbose) {
cout << "*************** PERFORMING BTF TRANSFORM ON LINEAR_PROBLEM **************" <<endl<<endl;
cout << "CrsMatrix *before* BTF transform: " << endl << endl;
//.........这里部分代码省略.........