本文整理汇总了C++中teuchos::RCP::Graph方法的典型用法代码示例。如果您正苦于以下问题:C++ RCP::Graph方法的具体用法?C++ RCP::Graph怎么用?C++ RCP::Graph使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类teuchos::RCP
的用法示例。
在下文中一共展示了RCP::Graph方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run_test
static int run_test(Teuchos::RCP<Epetra_CrsMatrix> matrix,
bool verbose, // display the graph before & after
bool contract, // set global number of partitions to 1/2 num procs
int partitioningType, // hypergraph or graph partitioning, or simple
int vertexWeightType, // use vertex weights?
int edgeWeightType, // use edge/hyperedge weights?
int objectType) // use isorropia's CrsMatrix or CrsGraph
{
int rc=0, fail = 0;
#ifdef HAVE_EPETRAEXT
int localProc = 0;
double balance1, balance2, cutn1, cutn2, cutl1, cutl2;
double balance3, cutn3, cutl3;
double cutWgt1, cutWgt2, cutWgt3;
int numCuts1, numCuts2, numCuts3, valid;
int numPartitions = 0;
int keepDenseEdges = 0;
int numProcs = 1;
#ifdef HAVE_MPI
const Epetra_MpiComm &Comm = dynamic_cast<const Epetra_MpiComm &>(matrix->Comm());
localProc = Comm.MyPID();
numProcs = Comm.NumProc();
#else
const Epetra_SerialComm &Comm = dynamic_cast<const Epetra_SerialComm &>(matrix->Comm());
#endif
int numRows = matrix->NumGlobalRows();
if (numRows < (numProcs * 100)){
// By default Zoltan throws out dense edges, defined as those
// whose number of non-zeros exceeds 25% of the number of vertices.
//
// If dense edges are thrown out of a small matrix, there may be nothing left.
keepDenseEdges = 1;
}
double myShareBefore = 1.0 / numProcs;
double myShare = myShareBefore;
if (contract){
numPartitions = numProcs / 2;
if (numPartitions > numRows)
numPartitions = numRows;
if (numPartitions > 0){
if (localProc < numPartitions){
myShare = 1.0 / numPartitions;
}
else{
myShare = 0.0;
}
}
else{
contract = 0;
}
}
// If we want Zoltan's or Isorropia's default weights, then we don't
// need to supply a CostDescriber object to createBalancedCopy,
// so we get to test the API functions that don't take a CostDescriber.
bool noCosts = ((vertexWeightType == NO_APPLICATION_SUPPLIED_WEIGHTS) &&
(edgeWeightType == NO_APPLICATION_SUPPLIED_WEIGHTS));
// Test the interface that has no parameters, if possible
bool noParams =
((partitioningType == HYPERGRAPH_PARTITIONING) && // default, so requires no params
(numPartitions == 0) && // >0 would require a parameter
(keepDenseEdges == 0)); // >0 would require a parameter
// Maps for original object
const Epetra_Map &sourceRowMap = matrix->RowMap();
const Epetra_Map &sourceRangeMap = matrix->RangeMap();
// const Epetra_Map &sourceColMap = matrix->ColMap();
const Epetra_Map &sourceDomainMap = matrix->DomainMap();
int numCols = matrix->NumGlobalCols();
int nMyRows = sourceRowMap.NumMyElements();
int base = sourceRowMap.IndexBase();
// Compute vertex and edge weights
Isorropia::Epetra::CostDescriber costs;
Teuchos::RCP<Epetra_Vector> vptr;
Teuchos::RCP<Epetra_CrsMatrix> eptr;
Teuchos::RCP<Epetra_Vector> hyperEdgeWeights;
if (edgeWeightType != NO_APPLICATION_SUPPLIED_WEIGHTS){
if (partitioningType == GRAPH_PARTITIONING){
// Create graph edge weights.
eptr = Teuchos::rcp(new Epetra_CrsMatrix(*matrix));
//.........这里部分代码省略.........
示例2: main
//.........这里部分代码省略.........
printParams.set("MyPID", MyPID);
printParams.set("Output Precision", 5);
printParams.set("Output Processor", 0);
if( verbose )
printParams.set("Output Information",
NOX::Utils::OuterIteration +
NOX::Utils::OuterIterationStatusTest +
NOX::Utils::InnerIteration +
NOX::Utils::Parameters +
NOX::Utils::Details +
NOX::Utils::Warning +
NOX::Utils::TestDetails);
else
printParams.set("Output Information", NOX::Utils::Error +
NOX::Utils::TestDetails);
NOX::Utils printing(printParams);
// Identify the test problem
if (printing.isPrintType(NOX::Utils::TestDetails))
printing.out() << "Starting epetra/NOX_Operators/NOX_Operators.exe" << std::endl;
// Identify processor information
#ifdef HAVE_MPI
if (printing.isPrintType(NOX::Utils::TestDetails)) {
printing.out() << "Parallel Run" << std::endl;
printing.out() << "Number of processors = " << NumProc << std::endl;
printing.out() << "Print Process = " << MyPID << std::endl;
}
Comm.Barrier();
if (printing.isPrintType(NOX::Utils::TestDetails))
printing.out() << "Process " << MyPID << " is alive!" << std::endl;
Comm.Barrier();
#else
if (printing.isPrintType(NOX::Utils::TestDetails))
printing.out() << "Serial Run" << std::endl;
#endif
int status = 0;
Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface;
// Need a NOX::Epetra::Vector for constructor
NOX::Epetra::Vector noxInitGuess(InitialGuess, NOX::DeepCopy);
// Analytic matrix
Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( Problem.GetMatrix(), false );
Epetra_Vector A_resultVec(Problem.GetMatrix()->Map());
interface->computeJacobian( InitialGuess, *A );
A->Apply( directionVec, A_resultVec );
// FD operator
Teuchos::RCP<Epetra_CrsGraph> graph = Teuchos::rcp( const_cast<Epetra_CrsGraph*>(&A->Graph()), false );
Teuchos::RCP<NOX::Epetra::FiniteDifference> FD = Teuchos::rcp(
new NOX::Epetra::FiniteDifference(printParams, iReq, noxInitGuess, graph) );
Epetra_Vector FD_resultVec(Problem.GetMatrix()->Map());
FD->computeJacobian(InitialGuess, *FD);
FD->Apply( directionVec, FD_resultVec );
// Matrix-Free operator
Teuchos::RCP<NOX::Epetra::MatrixFree> MF = Teuchos::rcp(
new NOX::Epetra::MatrixFree(printParams, iReq, noxInitGuess) );
Epetra_Vector MF_resultVec(Problem.GetMatrix()->Map());
MF->computeJacobian(InitialGuess, *MF);
MF->Apply( directionVec, MF_resultVec );
// Need NOX::Epetra::Vectors for tests
NOX::Epetra::Vector noxAvec ( A_resultVec , NOX::DeepCopy );
NOX::Epetra::Vector noxFDvec( FD_resultVec, NOX::DeepCopy );
NOX::Epetra::Vector noxMFvec( MF_resultVec, NOX::DeepCopy );
// Create a TestCompare class
NOX::Epetra::TestCompare tester( printing.out(), printing);
double abstol = 1.e-4;
double reltol = 1.e-4 ;
//NOX::TestCompare::CompareType aComp = NOX::TestCompare::Absolute;
status += tester.testVector( noxFDvec, noxAvec, reltol, abstol,
"Finite-Difference Operator Apply Test" );
status += tester.testVector( noxMFvec, noxAvec, reltol, abstol,
"Matrix-Free Operator Apply Test" );
// Summarize test results
if( status == 0 )
printing.out() << "Test passed!" << std::endl;
else
printing.out() << "Test failed!" << std::endl;
#ifdef HAVE_MPI
MPI_Finalize();
#endif
// Final return value (0 = successfull, non-zero = failure)
return status;
}