本文整理汇总了C++中teuchos::RCP::Import方法的典型用法代码示例。如果您正苦于以下问题:C++ RCP::Import方法的具体用法?C++ RCP::Import怎么用?C++ RCP::Import使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类teuchos::RCP
的用法示例。
在下文中一共展示了RCP::Import方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: importer
Teuchos::RCP<Epetra_MapColoring>
Colorer::generateColMapColoring()
{
Teuchos::RCP<Epetra_MapColoring> rowColorMap = generateRowMapColoring();
// Color map has colored rows -- need colored columns
Epetra_Import importer(*colmap_, *input_map_);
Teuchos::RCP<Epetra_MapColoring> colorMap =
Teuchos::rcp(new Epetra_MapColoring(*colmap_));
colorMap->Import(*rowColorMap, importer, Insert);
return (colorMap);
}
示例2: run_test
//.........这里部分代码省略.........
RHS = new Epetra_MultiVector(Copy, sourceRowMap, vals, 1, 1);
LHS = new Epetra_MultiVector(Copy, *LHSmap, vals, 1, 1);
if (valSize){
delete [] vals;
}
problem = new Epetra_LinearProblem(matrix.get(), LHS, RHS);
Epetra_LinearProblem lp = *problem;
if (lp.CheckInput()){
ERROREXIT((localProc==0), "Error creating a LinearProblem");
}
if (noParams && noCosts){
problemPtr = Isorropia::Epetra::createBalancedCopy(lp);
}
else if (noCosts){
problemPtr = Isorropia::Epetra::createBalancedCopy(lp, params);
}
targetRowMap = &(problemPtr->GetMatrix()->RowMatrixRowMap());
targetColMap = &(problemPtr->GetMatrix()->RowMatrixColMap());
}
// Redistribute the edge weights
// Comment this out since we don't redistribute columns
if (edgeWeightType != NO_APPLICATION_SUPPLIED_WEIGHTS){
if (partitioningType == GRAPH_PARTITIONING){
Epetra_Import *importer = NULL;
if (objectType == EPETRA_CRSGRAPH){
newewgts = Teuchos::rcp(new Epetra_CrsMatrix(Copy, *graphPtr));
targetRowMap = &(newewgts->RowMap());
targetColMap = &(newewgts->ColMap());
}
else{
newewgts = Teuchos::rcp(new Epetra_CrsMatrix(Copy, *targetRowMap, *targetColMap, 0));
}
importer = new Epetra_Import(*targetRowMap, sourceRowMap);
newewgts->Import(*eptr, *importer, Insert);
newewgts->FillComplete(*targetColMap, *targetRowMap);
costs.setGraphEdgeWeights(newewgts);
}
}
// Redistribute the vertex weights
if ((vertexWeightType != NO_APPLICATION_SUPPLIED_WEIGHTS)){
Epetra_Import *importer = NULL;
if (objectType == EPETRA_CRSGRAPH){
newvwgts = Teuchos::rcp(new Epetra_Vector(*targetBlockRowMap));
importer = new Epetra_Import(*targetBlockRowMap, sourceRowMap);
}
else{
newvwgts = Teuchos::rcp(new Epetra_Vector(*targetRowMap));
importer = new Epetra_Import(*targetRowMap, sourceRowMap);
}
示例3: if
void
PeridigmNS::InterfaceData::WriteExodusOutput(int timeStep, const float & timeValue, Teuchos::RCP<Epetra_Vector> x, Teuchos::RCP<Epetra_Vector> y){
int error_int = 0;
int CPU_word_size = 0;
int IO_word_size = 0;
float version = 0;
std::string outputFileNameStr = filename.str();
std::vector<char> writable(outputFileNameStr.size() + 1);
std::copy(outputFileNameStr.begin(), outputFileNameStr.end(), writable.begin());
exoid = ex_open(&writable[0], EX_WRITE, &CPU_word_size, &IO_word_size, &version);
error_int = ex_put_time(exoid, timeStep, &timeValue);
TEUCHOS_TEST_FOR_EXCEPTION(error_int,std::logic_error, "ex_put_time(): Failure");
float * quadValues = new float[numQuads];
float * triValues = new float[numTris];
// populate the quad values
int quadIndex = 0;
int triIndex = 0;
for(int i=0;i<numOwnedPoints;++i){
if(interfaceNodesMap->ElementSize(i)==4){
quadValues[quadIndex] = (*interfaceAperture)[i];
quadIndex++;
}
else if(interfaceNodesMap->ElementSize(i)==3){
triValues[triIndex] = (*interfaceAperture)[i];
triIndex++;
}
else{
TEUCHOS_TEST_FOR_EXCEPTION(true,std::invalid_argument,"size of this element is not recognized: " << interfaceNodesMap->ElementSize(i));
}
}
int blockIndex = 0;
const int varIndex = 1;
blockIndex++;
if(numQuads > 0){
error_int = ex_put_elem_var(exoid, timeStep, varIndex, blockIndex, numQuads, &quadValues[0]);
TEUCHOS_TEST_FOR_EXCEPTION(error_int,std::logic_error,"Failure ex_put_elem_var(): ");
}
blockIndex++;
if(numTris > 0){
error_int = ex_put_elem_var(exoid, timeStep, varIndex, blockIndex, numTris, &triValues[0]);
TEUCHOS_TEST_FOR_EXCEPTION(error_int,std::logic_error,"Failure ex_put_elem_var(): ");
}
delete [] quadValues;
delete [] triValues;
// update the apertures...
// import the mothership vectors x and y to the overlap epetra vectors
Teuchos::RCP<const Epetra_Import> importer = Teuchos::rcp(new Epetra_Import(*elemOverlapMap, x->Map()));
Teuchos::RCP<Epetra_Vector> xOverlap = Teuchos::rcp(new Epetra_Vector(*elemOverlapMap,true));
xOverlap->Import(*x,*importer,Insert);
Teuchos::RCP<Epetra_Vector> yOverlap = Teuchos::rcp(new Epetra_Vector(*elemOverlapMap,true));
yOverlap->Import(*y,*importer,Insert);
double *xValues;
xOverlap->ExtractView( &xValues );
double *yValues;
yOverlap->ExtractView( &yValues );
double xLeft=0,yLeft=0,zLeft=0,xRight=0,yRight=0,zRight=0;
double XLeft=0,YLeft=0,ZLeft=0,XRight=0,YRight=0,ZRight=0;
double X=0,Y=0;
double dx=0,dy=0,dz=0,dX=0,dY=0,dZ=0;
int elemIndexLeft=-1,elemIndexRight=-1,GIDLeft=-1,GIDRight=-1;
for(int i=0;i<numOwnedPoints;++i){
GIDLeft = elementLeft[i];
GIDRight = elementRight[i];
elemIndexLeft = xOverlap->Map().FirstPointInElement(elemOverlapMap->LID(GIDLeft));
elemIndexRight = xOverlap->Map().FirstPointInElement(elemOverlapMap->LID(GIDRight));
xLeft = xValues[elemIndexLeft+0];
yLeft = xValues[elemIndexLeft+1];
zLeft = xValues[elemIndexLeft+2];
xRight = xValues[elemIndexRight+0];
yRight = xValues[elemIndexRight+1];
zRight = xValues[elemIndexRight+2];
XLeft = yValues[elemIndexLeft+0];
YLeft = yValues[elemIndexLeft+1];
ZLeft = yValues[elemIndexLeft+2];
XRight = yValues[elemIndexRight+0];
YRight = yValues[elemIndexRight+1];
ZRight = yValues[elemIndexRight+2];
dx = xRight - xLeft;
dy = yRight - yLeft;
dz = zRight - zLeft;
dX = XRight - XLeft;
dY = YRight - YLeft;
//.........这里部分代码省略.........
示例4: Comm
std::pair<Teuchos::RCP<std::vector<std::size_t> >,
Teuchos::RCP<std::vector<Teuchos::Tuple<double,3> > > >
getSideIdsAndCoords(const STK_Interface & mesh,
const std::string & sideName, const std::string type_)
{
Epetra_MpiComm Comm(mesh.getBulkData()->parallel());
unsigned physicalDim = mesh.getDimension();
// grab local IDs and coordinates on this side
// and build local epetra vector
//////////////////////////////////////////////////////////////////
std::pair<Teuchos::RCP<std::vector<std::size_t> >,
Teuchos::RCP<std::vector<Teuchos::Tuple<double,3> > > > sidePair =
getLocalSideIdsAndCoords(mesh,sideName,type_);
std::vector<std::size_t> & local_side_ids = *sidePair.first;
std::vector<Teuchos::Tuple<double,3> > & local_side_coords = *sidePair.second;
int nodeCount = local_side_ids.size();
// build local Epetra objects
Epetra_Map idMap(-1,nodeCount,0,Comm);
Teuchos::RCP<Epetra_IntVector> localIdVec = Teuchos::rcp(new Epetra_IntVector(idMap));
Teuchos::RCP<Epetra_MultiVector> localCoordVec = Teuchos::rcp(new Epetra_MultiVector(idMap,physicalDim));
// copy local Ids into Epetra vector
for(std::size_t n=0;n<local_side_ids.size();n++) {
std::size_t nodeId = local_side_ids[n];
Teuchos::Tuple<double,3> & coords = local_side_coords[n];
(*localIdVec)[n] = nodeId;
for(unsigned d=0;d<physicalDim;d++)
(*(*localCoordVec)(d))[n] = coords[d];
}
// fully distribute epetra vector across all processors
// (these are "distributed" or "dist" objects)
//////////////////////////////////////////////////////////////
int dist_nodeCount = idMap.NumGlobalElements();
// build global epetra objects
Epetra_LocalMap distMap(dist_nodeCount,0,Comm);
Teuchos::RCP<Epetra_IntVector> distIdVec = Teuchos::rcp(new Epetra_IntVector(distMap));
Teuchos::RCP<Epetra_MultiVector> distCoordVec = Teuchos::rcp(new Epetra_MultiVector(distMap,physicalDim));
// export to the localVec object from the "vector" object
Epetra_Import importer(distMap,idMap);
TEUCHOS_ASSERT(distIdVec->Import(*localIdVec,importer,Insert)==0);
TEUCHOS_ASSERT(distCoordVec->Import(*localCoordVec,importer,Insert)==0);
// convert back to generic stl vector objects
///////////////////////////////////////////////////////////
Teuchos::RCP<std::vector<std::size_t> > dist_side_ids
= Teuchos::rcp(new std::vector<std::size_t>(dist_nodeCount));
Teuchos::RCP<std::vector<Teuchos::Tuple<double,3> > > dist_side_coords
= Teuchos::rcp(new std::vector<Teuchos::Tuple<double,3> >(dist_nodeCount));
// copy local Ids into Epetra vector
for(std::size_t n=0;n<dist_side_ids->size();n++) {
(*dist_side_ids)[n] = (*distIdVec)[n];
Teuchos::Tuple<double,3> & coords = (*dist_side_coords)[n];
for(unsigned d=0;d<physicalDim;d++)
coords[d] = (*(*distCoordVec)(d))[n];
}
return std::make_pair(dist_side_ids,dist_side_coords);
}
示例5: serialImporter
AmesosBTFGlobal_LinearProblem::NewTypeRef
AmesosBTFGlobal_LinearProblem::
operator()( OriginalTypeRef orig )
{
origObj_ = &orig;
// Extract the matrix and vectors from the linear problem
OldRHS_ = Teuchos::rcp( orig.GetRHS(), false );
OldLHS_ = Teuchos::rcp( orig.GetLHS(), false );
OldMatrix_ = Teuchos::rcp( dynamic_cast<Epetra_CrsMatrix *>( orig.GetMatrix() ), false );
int nGlobal = OldMatrix_->NumGlobalRows();
int n = OldMatrix_->NumMyRows();
// Check if the matrix is on one processor.
int myMatProc = -1, matProc = -1;
int myPID = OldMatrix_->Comm().MyPID();
int numProcs = OldMatrix_->Comm().NumProc();
const Epetra_BlockMap& oldRowMap = OldMatrix_->RowMap();
// Get some information about the parallel distribution.
int maxMyRows = 0;
std::vector<int> numGlobalElem( numProcs );
OldMatrix_->Comm().GatherAll(&n, &numGlobalElem[0], 1);
OldMatrix_->Comm().MaxAll(&n, &maxMyRows, 1);
for (int proc=0; proc<numProcs; proc++)
{
if (OldMatrix_->NumGlobalNonzeros() == OldMatrix_->NumMyNonzeros())
myMatProc = myPID;
}
OldMatrix_->Comm().MaxAll( &myMatProc, &matProc, 1 );
Teuchos::RCP<Epetra_CrsMatrix> serialMatrix;
Teuchos::RCP<Epetra_Map> serialMap;
if( oldRowMap.DistributedGlobal() && matProc == -1)
{
// The matrix is distributed and needs to be moved to processor zero.
// Set the zero processor as the master.
matProc = 0;
serialMap = Teuchos::rcp( new Epetra_Map( Epetra_Util::Create_Root_Map( OldMatrix_->RowMap(), matProc ) ) );
Epetra_Import serialImporter( *serialMap, OldMatrix_->RowMap() );
serialMatrix = Teuchos::rcp( new Epetra_CrsMatrix( Copy, *serialMap, 0 ) );
serialMatrix->Import( *OldMatrix_, serialImporter, Insert );
serialMatrix->FillComplete();
}
else {
// The old matrix has already been moved to one processor (matProc).
serialMatrix = OldMatrix_;
}
if( debug_ )
{
cout << "Original (serial) Matrix:\n";
cout << *serialMatrix << endl;
}
// Obtain the current row and column orderings
std::vector<int> origGlobalRows(nGlobal), origGlobalCols(nGlobal);
serialMatrix->RowMap().MyGlobalElements( &origGlobalRows[0] );
serialMatrix->ColMap().MyGlobalElements( &origGlobalCols[0] );
// Perform reindexing on the full serial matrix (needed for BTF).
Epetra_Map reIdxMap( serialMatrix->RowMap().NumGlobalElements(), serialMatrix->RowMap().NumMyElements(), 0, serialMatrix->Comm() );
Teuchos::RCP<EpetraExt::ViewTransform<Epetra_CrsMatrix> > reIdxTrans =
Teuchos::rcp( new EpetraExt::CrsMatrix_Reindex( reIdxMap ) );
Epetra_CrsMatrix newSerialMatrix = (*reIdxTrans)( *serialMatrix );
reIdxTrans->fwd();
// Compute and apply BTF to the serial CrsMatrix and has been filtered by the threshold
EpetraExt::AmesosBTF_CrsMatrix BTFTrans( threshold_, upperTri_, verbose_, debug_ );
Epetra_CrsMatrix newSerialMatrixBTF = BTFTrans( newSerialMatrix );
rowPerm_ = BTFTrans.RowPerm();
colPerm_ = BTFTrans.ColPerm();
blockPtr_ = BTFTrans.BlockPtr();
numBlocks_ = BTFTrans.NumBlocks();
if (myPID == matProc && verbose_) {
bool isSym = true;
for (int i=0; i<nGlobal; ++i) {
if (rowPerm_[i] != colPerm_[i]) {
isSym = false;
break;
}
}
std::cout << "The BTF permutation symmetry (0=false,1=true) is : " << isSym << std::endl;
}
// Compute the permutation w.r.t. the original row and column GIDs.
std::vector<int> origGlobalRowsPerm(nGlobal), origGlobalColsPerm(nGlobal);
if (myPID == matProc) {
for (int i=0; i<nGlobal; ++i) {
origGlobalRowsPerm[i] = origGlobalRows[ rowPerm_[i] ];
origGlobalColsPerm[i] = origGlobalCols[ colPerm_[i] ];
}
}
//.........这里部分代码省略.........