本文整理汇总了C++中teuchos::RCP::ColMap方法的典型用法代码示例。如果您正苦于以下问题:C++ RCP::ColMap方法的具体用法?C++ RCP::ColMap怎么用?C++ RCP::ColMap使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类teuchos::RCP
的用法示例。
在下文中一共展示了RCP::ColMap方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: FiniteDifference
FiniteDifferenceColoring::FiniteDifferenceColoring(
Teuchos::ParameterList& printingParams,
const Teuchos::RCP<Interface::Required>& i,
const NOX::Epetra::Vector& x,
const Teuchos::RCP<Epetra_CrsGraph>& rawGraph_,
const Teuchos::RCP<Epetra_MapColoring>& colorMap_,
const Teuchos::RCP< vector<Epetra_IntVector> >& columns_,
bool parallelColoring,
bool distance1_,
double beta_, double alpha_) :
FiniteDifference(printingParams, i, x, rawGraph_, beta_, alpha_),
coloringType(NOX_SERIAL),
distance1(distance1_),
colorMap(colorMap_),
columns(columns_),
numColors(colorMap->NumColors()),
maxNumColors(colorMap->MaxNumColors()),
colorList(colorMap->ListOfColors()),
cMap(0),
Importer(0),
colorVect(0),
betaColorVect(0),
mappedColorVect(new Epetra_Vector(rawGraph_->ColMap())),
xCol_perturb(new Epetra_Vector(rawGraph_->ColMap())),
columnMap(&(rawGraph_->ColMap())),
rowColImporter(new Epetra_Import(*columnMap, fo.Map()))
{
label = "NOX::FiniteDifferenceColoring Jacobian";
if( parallelColoring )
coloringType = NOX_PARALLEL;
createColorContainers();
}
示例2: Exception
Colorer::Colorer(Teuchos::RCP<const Epetra_CrsGraph> input_graph,
const Teuchos::ParameterList& paramlist, bool compute_now):
Operator(input_graph, paramlist, 1)
{
#ifdef HAVE_EPETRAEXT
colmap_ = Teuchos::rcp(&(input_graph->ColMap()),false);
#endif /* HAVE_EPETRAEXT */
#ifdef HAVE_ISORROPIA_ZOLTAN
lib_ = Teuchos::rcp(new ZoltanLibClass(input_graph, Library::graph_input_));
#else /* HAVE_ISORROPIA_ZOLTAN */
throw Isorropia::Exception("Coloring only available in Zoltan");
return ;
#endif /* HAVE_ISORROPIA_ZOLTAN */
if (compute_now)
color(true);
}
示例3: blockOffsets
//.........这里部分代码省略.........
// may be more expensive!
// scatter operation for each cell in workset
for(std::size_t worksetCellIndex=0;worksetCellIndex<localCellIds.size();++worksetCellIndex) {
std::size_t cellLocalId = localCellIds[worksetCellIndex];
globalIndexer_->getElementGIDs(cellLocalId,GIDs,blockId);
// caculate the local IDs for this element
LIDs.resize(GIDs.size());
for(std::size_t i=0;i<GIDs.size();i++) {
// used for doing local ID lookups
RCP<const Epetra_Map> r_map = blockedContainer->getMapForBlock(GIDs[i].first);
LIDs[i] = r_map->LID(GIDs[i].second);
}
// loop over each field to be scattered
Teuchos::ArrayRCP<double> local_r;
for(std::size_t fieldIndex = 0; fieldIndex < scatterFields_.size(); fieldIndex++) {
int fieldNum = fieldIds_[fieldIndex];
int blockRowIndex = globalIndexer_->getFieldBlock(fieldNum);
// grab local data for inputing
if(r!=Teuchos::null) {
RCP<SpmdVectorBase<double> > block_r = rcp_dynamic_cast<SpmdVectorBase<double> >(r->getNonconstVectorBlock(blockRowIndex));
block_r->getNonconstLocalData(ptrFromRef(local_r));
}
const std::vector<int> & elmtOffset = globalIndexer_->getGIDFieldOffsets(blockId,fieldNum);
// loop over the basis functions (currently they are nodes)
for(std::size_t rowBasisNum = 0; rowBasisNum < elmtOffset.size(); rowBasisNum++) {
const ScalarT & scatterField = (scatterFields_[fieldIndex])(worksetCellIndex,rowBasisNum);
int rowOffset = elmtOffset[rowBasisNum];
int r_lid = LIDs[rowOffset];
// Sum residual
if(local_r!=Teuchos::null)
local_r[r_lid] += (scatterField.val());
blockOffsets[numFieldBlocks] = scatterField.size(); // add the sentinel
// loop over the sensitivity indices: all DOFs on a cell
jacRow.resize(scatterField.size());
for(int sensIndex=0;sensIndex<scatterField.size();++sensIndex) {
jacRow[sensIndex] = scatterField.fastAccessDx(sensIndex);
}
for(int blockColIndex=0;blockColIndex<numFieldBlocks;blockColIndex++) {
int start = blockOffsets[blockColIndex];
int end = blockOffsets[blockColIndex+1];
if(end-start<=0)
continue;
// check hash table for jacobian sub block
std::pair<int,int> blockIndex = std::make_pair(blockRowIndex,blockColIndex);
Teuchos::RCP<Epetra_CrsMatrix> subJac = jacEpetraBlocks[blockIndex];
// if you didn't find one before, add it to the hash table
if(subJac==Teuchos::null) {
Teuchos::RCP<Thyra::LinearOpBase<double> > tOp = Jac->getNonconstBlock(blockIndex.first,blockIndex.second);
// block operator is null, don't do anything (it is excluded)
if(Teuchos::is_null(tOp))
continue;
Teuchos::RCP<Epetra_Operator> eOp = Thyra::get_Epetra_Operator(*tOp);
subJac = rcp_dynamic_cast<Epetra_CrsMatrix>(eOp,true);
jacEpetraBlocks[blockIndex] = subJac;
}
// Sum Jacobian
int err = subJac->SumIntoMyValues(r_lid, end-start, &jacRow[start],&LIDs[start]);
if(err!=0) {
RCP<const Epetra_Map> rr = blockedContainer->getMapForBlock(GIDs[start].first);
bool sameColMap = subJac->ColMap().SameAs(*rr);
std::stringstream ss;
ss << "Failed inserting row: " << GIDs[rowOffset].second << " (" << r_lid << "): ";
for(int i=start;i<end;i++)
ss << GIDs[i].second << " (" << LIDs[i] << ") ";
ss << std::endl;
ss << "Into block " << blockRowIndex << ", " << blockColIndex << std::endl;
ss << "scatter field = ";
scatterFields_[fieldIndex].print(ss);
ss << std::endl;
ss << "Same map = " << (sameColMap ? "true" : "false") << std::endl;
TEUCHOS_TEST_FOR_EXCEPTION(err!=0,std::runtime_error,ss.str());
}
}
} // end rowBasisNum
} // end fieldIndex
}
}
示例4: Timer
//.........这里部分代码省略.........
#endif
}
int test_var = 0;
if(test_var != 0){
std::cout << "The current solution length is: " << x->MyLength() << std::endl;
x->Print(std::cout);
}
// Get preconditioner operator, if requested
Teuchos::RCP<Epetra_Operator> WPrec_out;
if (outArgs.supports(OUT_ARG_WPrec)) WPrec_out = outArgs.get_WPrec();
//
// Compute the functions
//
bool f_already_computed = false;
// W matrix
if (W_out != Teuchos::null) {
app->computeGlobalJacobian(alpha, beta, omega, curr_time, x_dot.get(), x_dotdot.get(),*x,
sacado_param_vec, f_out.get(), *W_out_crs);
#ifdef WRITE_MASS_MATRIX_TO_MM_FILE
//IK, 7/15/14: write mass matrix to matrix market file
//Warning: to read this in to MATLAB correctly, code must be run in serial.
//Otherwise Mass will have a distributed Map which would also need to be read in to MATLAB for proper
//reading in of Mass.
app->computeGlobalJacobian(1.0, 0.0, 0.0, curr_time, x_dot.get(), x_dotdot.get(), *x,
sacado_param_vec, ftmp.get(), *Mass);
EpetraExt::RowMatrixToMatrixMarketFile("mass.mm", *Mass);
EpetraExt::BlockMapToMatrixMarketFile("rowmap.mm", Mass->RowMap());
EpetraExt::BlockMapToMatrixMarketFile("colmap.mm", Mass->ColMap());
Teuchos::RCP<Teuchos::FancyOStream> out = Teuchos::VerboseObjectBase::getDefaultOStream();
#endif
f_already_computed=true;
if(test_var != 0){
//std::cout << "The current rhs length is: " << f_out->MyLength() << std::endl;
//f_out->Print(std::cout);
std::cout << "The current Jacobian length is: " << W_out_crs->NumGlobalRows() << std::endl;
W_out_crs->Print(std::cout);
}
}
if (WPrec_out != Teuchos::null) {
app->computeGlobalJacobian(alpha, beta, omega, curr_time, x_dot.get(), x_dotdot.get(), *x,
sacado_param_vec, f_out.get(), *Extra_W_crs);
f_already_computed=true;
if(test_var != 0){
//std::cout << "The current rhs length is: " << f_out->MyLength() << std::endl;
//f_out->Print(std::cout);
std::cout << "The current preconditioner length is: " << Extra_W_crs->NumGlobalRows() << std::endl;
Extra_W_crs->Print(std::cout);
}
app->computeGlobalPreconditioner(Extra_W_crs, WPrec_out);
}
// scalar df/dp
for (int i=0; i<num_param_vecs; i++) {
Teuchos::RCP<Epetra_MultiVector> dfdp_out =
outArgs.get_DfDp(i).getMultiVector();
if (dfdp_out != Teuchos::null) {
Teuchos::Array<int> p_indexes =
outArgs.get_DfDp(i).getDerivativeMultiVector().getParamIndexes();
示例5: run_test
//.........这里部分代码省略.........
//sublist.set("DEBUG_LEVEL", "5"); // proc 0 will trace Zoltan calls
//sublist.set("DEBUG_MEMORY", "2"); // Zoltan will trace alloc & free
}
#else
ERROREXIT((localProc==0),
"Zoltan partitioning required but Zoltan not available.")
#endif
// Function scope values
Teuchos::RCP<Epetra_Vector> newvwgts;
Teuchos::RCP<Epetra_CrsMatrix> newewgts;
// Function scope values required for LinearProblem
Epetra_LinearProblem *problem = NULL;
Epetra_Map *LHSmap = NULL;
Epetra_MultiVector *RHS = NULL;
Epetra_MultiVector *LHS = NULL;
// Reference counted pointer to balanced object
Epetra_CrsMatrix *matrixPtr=NULL;
Epetra_CrsGraph *graphPtr=NULL;
Epetra_RowMatrix *rowMatrixPtr=NULL;
Epetra_LinearProblem *problemPtr=NULL;
// Row map for balanced object
const Epetra_BlockMap *targetBlockRowMap=NULL; // for input CrsGraph
const Epetra_Map *targetRowMap=NULL; // for all other inputs
// Column map for balanced object
const Epetra_BlockMap *targetBlockColMap=NULL; // for input CrsGraph
const Epetra_Map *targetColMap=NULL; // for all other inputs
if (objectType == EPETRA_CRSMATRIX){
if (noParams && noCosts){
matrixPtr = Isorropia::Epetra::createBalancedCopy(*matrix);
}
else if (noCosts){
matrixPtr = Isorropia::Epetra::createBalancedCopy(*matrix, params);
}
targetRowMap = &(matrixPtr->RowMap());
targetColMap = &(matrixPtr->ColMap());
}
else if (objectType == EPETRA_CRSGRAPH){
const Epetra_CrsGraph graph = matrix->Graph();
if (noParams && noCosts){
graphPtr = Isorropia::Epetra::createBalancedCopy(graph);
}
else if (noCosts){
graphPtr = Isorropia::Epetra::createBalancedCopy(graph, params);
}
targetBlockRowMap = &(graphPtr->RowMap());
targetBlockColMap = &(graphPtr->ColMap());
}
else if (objectType == EPETRA_ROWMATRIX){
if (noParams && noCosts){
rowMatrixPtr = Isorropia::Epetra::createBalancedCopy(*matrix);
}
else if (noCosts){
rowMatrixPtr = Isorropia::Epetra::createBalancedCopy(*matrix, params);
}
targetRowMap = &(rowMatrixPtr->RowMatrixRowMap());
targetColMap = &(rowMatrixPtr->RowMatrixColMap());
示例6: SplitMatrix2x2
// helper routines
bool SplitMatrix2x2(Teuchos::RCP<const Epetra_CrsMatrix> A,
const Epetra_Map& A11rowmap,
const Epetra_Map& A22rowmap,
Teuchos::RCP<Epetra_CrsMatrix>& A11,
Teuchos::RCP<Epetra_CrsMatrix>& A12,
Teuchos::RCP<Epetra_CrsMatrix>& A21,
Teuchos::RCP<Epetra_CrsMatrix>& A22)
{
if (A==Teuchos::null)
{
std::cout << "ERROR: SplitMatrix2x2: A==null on entry" << std::endl;
return false;
}
const Epetra_Comm& Comm = A->Comm();
const Epetra_Map& A22map = A22rowmap;
const Epetra_Map& A11map = A11rowmap;
//----------------------------- create a parallel redundant map of A22map
std::map<int,int> a22gmap;
{
std::vector<int> a22global(A22map.NumGlobalElements());
int count=0;
for (int proc=0; proc<Comm.NumProc(); ++proc)
{
int length = 0;
if (proc==Comm.MyPID())
{
for (int i=0; i<A22map.NumMyElements(); ++i)
{
a22global[count+length] = A22map.GID(i);
++length;
}
}
Comm.Broadcast(&length,1,proc);
Comm.Broadcast(&a22global[count],length,proc);
count += length;
}
if (count != A22map.NumGlobalElements())
{
std::cout << "ERROR SplitMatrix2x2: mismatch in dimensions" << std::endl;
return false;
}
// create the map
for (int i=0; i<count; ++i)
a22gmap[a22global[i]] = 1;
a22global.clear();
}
//--------------------------------------------------- create matrix A22
A22 = Teuchos::rcp(new Epetra_CrsMatrix(Copy,A22map,100));
{
std::vector<int> a22gcindices(100);
std::vector<double> a22values(100);
for (int i=0; i<A->NumMyRows(); ++i)
{
const int grid = A->GRID(i);
if (A22map.MyGID(grid)==false)
continue;
int numentries;
double* values;
int* cindices;
int err = A->ExtractMyRowView(i,numentries,values,cindices);
if (err)
{
std::cout << "ERROR: SplitMatrix2x2: A->ExtractMyRowView returned " << err << std::endl;
return false;
}
if (numentries>(int)a22gcindices.size())
{
a22gcindices.resize(numentries);
a22values.resize(numentries);
}
int count=0;
for (int j=0; j<numentries; ++j)
{
const int gcid = A->ColMap().GID(cindices[j]);
// see whether we have gcid in a22gmap
std::map<int,int>::iterator curr = a22gmap.find(gcid);
if (curr==a22gmap.end()) continue;
//std::cout << gcid << " ";
a22gcindices[count] = gcid;
a22values[count] = values[j];
++count;
}
//std::cout << std::endl; fflush(stdout);
// add this filtered row to A22
err = A22->InsertGlobalValues(grid,count,&a22values[0],&a22gcindices[0]);
if (err<0)
{
std::cout << "ERROR: SplitMatrix2x2: A->InsertGlobalValues returned " << err << std::endl;
return false;
}
} //for (int i=0; i<A->NumMyRows(); ++i)
a22gcindices.clear();
a22values.clear();
}
//.........这里部分代码省略.........
示例7: serialImporter
AmesosBTFGlobal_LinearProblem::NewTypeRef
AmesosBTFGlobal_LinearProblem::
operator()( OriginalTypeRef orig )
{
origObj_ = &orig;
// Extract the matrix and vectors from the linear problem
OldRHS_ = Teuchos::rcp( orig.GetRHS(), false );
OldLHS_ = Teuchos::rcp( orig.GetLHS(), false );
OldMatrix_ = Teuchos::rcp( dynamic_cast<Epetra_CrsMatrix *>( orig.GetMatrix() ), false );
int nGlobal = OldMatrix_->NumGlobalRows();
int n = OldMatrix_->NumMyRows();
// Check if the matrix is on one processor.
int myMatProc = -1, matProc = -1;
int myPID = OldMatrix_->Comm().MyPID();
int numProcs = OldMatrix_->Comm().NumProc();
const Epetra_BlockMap& oldRowMap = OldMatrix_->RowMap();
// Get some information about the parallel distribution.
int maxMyRows = 0;
std::vector<int> numGlobalElem( numProcs );
OldMatrix_->Comm().GatherAll(&n, &numGlobalElem[0], 1);
OldMatrix_->Comm().MaxAll(&n, &maxMyRows, 1);
for (int proc=0; proc<numProcs; proc++)
{
if (OldMatrix_->NumGlobalNonzeros() == OldMatrix_->NumMyNonzeros())
myMatProc = myPID;
}
OldMatrix_->Comm().MaxAll( &myMatProc, &matProc, 1 );
Teuchos::RCP<Epetra_CrsMatrix> serialMatrix;
Teuchos::RCP<Epetra_Map> serialMap;
if( oldRowMap.DistributedGlobal() && matProc == -1)
{
// The matrix is distributed and needs to be moved to processor zero.
// Set the zero processor as the master.
matProc = 0;
serialMap = Teuchos::rcp( new Epetra_Map( Epetra_Util::Create_Root_Map( OldMatrix_->RowMap(), matProc ) ) );
Epetra_Import serialImporter( *serialMap, OldMatrix_->RowMap() );
serialMatrix = Teuchos::rcp( new Epetra_CrsMatrix( Copy, *serialMap, 0 ) );
serialMatrix->Import( *OldMatrix_, serialImporter, Insert );
serialMatrix->FillComplete();
}
else {
// The old matrix has already been moved to one processor (matProc).
serialMatrix = OldMatrix_;
}
if( debug_ )
{
cout << "Original (serial) Matrix:\n";
cout << *serialMatrix << endl;
}
// Obtain the current row and column orderings
std::vector<int> origGlobalRows(nGlobal), origGlobalCols(nGlobal);
serialMatrix->RowMap().MyGlobalElements( &origGlobalRows[0] );
serialMatrix->ColMap().MyGlobalElements( &origGlobalCols[0] );
// Perform reindexing on the full serial matrix (needed for BTF).
Epetra_Map reIdxMap( serialMatrix->RowMap().NumGlobalElements(), serialMatrix->RowMap().NumMyElements(), 0, serialMatrix->Comm() );
Teuchos::RCP<EpetraExt::ViewTransform<Epetra_CrsMatrix> > reIdxTrans =
Teuchos::rcp( new EpetraExt::CrsMatrix_Reindex( reIdxMap ) );
Epetra_CrsMatrix newSerialMatrix = (*reIdxTrans)( *serialMatrix );
reIdxTrans->fwd();
// Compute and apply BTF to the serial CrsMatrix and has been filtered by the threshold
EpetraExt::AmesosBTF_CrsMatrix BTFTrans( threshold_, upperTri_, verbose_, debug_ );
Epetra_CrsMatrix newSerialMatrixBTF = BTFTrans( newSerialMatrix );
rowPerm_ = BTFTrans.RowPerm();
colPerm_ = BTFTrans.ColPerm();
blockPtr_ = BTFTrans.BlockPtr();
numBlocks_ = BTFTrans.NumBlocks();
if (myPID == matProc && verbose_) {
bool isSym = true;
for (int i=0; i<nGlobal; ++i) {
if (rowPerm_[i] != colPerm_[i]) {
isSym = false;
break;
}
}
std::cout << "The BTF permutation symmetry (0=false,1=true) is : " << isSym << std::endl;
}
// Compute the permutation w.r.t. the original row and column GIDs.
std::vector<int> origGlobalRowsPerm(nGlobal), origGlobalColsPerm(nGlobal);
if (myPID == matProc) {
for (int i=0; i<nGlobal; ++i) {
origGlobalRowsPerm[i] = origGlobalRows[ rowPerm_[i] ];
origGlobalColsPerm[i] = origGlobalCols[ colPerm_[i] ];
}
}
//.........这里部分代码省略.........