本文整理汇总了C++中teuchos::RCP::MyGlobalElements方法的典型用法代码示例。如果您正苦于以下问题:C++ RCP::MyGlobalElements方法的具体用法?C++ RCP::MyGlobalElements怎么用?C++ RCP::MyGlobalElements使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类teuchos::RCP
的用法示例。
在下文中一共展示了RCP::MyGlobalElements方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rcp
//EpetraMap_To_TpetraMap: takes in Epetra_Map object, converts it to its equivalent Tpetra::Map object,
//and returns an RCP pointer to this Tpetra::Map
Teuchos::RCP<const Tpetra_Map> Petra::EpetraMap_To_TpetraMap(const Teuchos::RCP<const Epetra_Map>& epetraMap_,
const Teuchos::RCP<const Teuchos::Comm<int> >& commT_)
{
const std::size_t numElements = Teuchos::as<std::size_t>(epetraMap_->NumMyElements());
const auto indexBase = Teuchos::as<GO>(epetraMap_->IndexBase());
if (epetraMap_->DistributedGlobal() || epetraMap_->Comm().NumProc() == Teuchos::OrdinalTraits<int>::one()) {
Teuchos::Array<Tpetra_GO> indices(numElements);
int *epetra_indices = epetraMap_->MyGlobalElements();
for(LO i=0; i < numElements; i++)
indices[i] = epetra_indices[i];
const Tpetra::global_size_t computeGlobalElements = Teuchos::OrdinalTraits<Tpetra::global_size_t>::invalid();
return Teuchos::rcp(new Tpetra_Map(computeGlobalElements, indices, indexBase, commT_));
} else {
return Teuchos::rcp(new Tpetra_Map(numElements, indexBase, commT_, Tpetra::LocallyReplicated));
}
}
示例2:
void
Adapt::NodalDataBlock::resizeOverlapMap(Teuchos::RCP<const Epetra_Map> overlap_nodeMap, const Epetra_Comm& comm){
// overlap_node_map = Teuchos::rcp(new Epetra_BlockMap(numGlobalNodes,
overlap_node_map = Teuchos::rcp(new Epetra_BlockMap(-1,
overlap_nodeMap->NumMyElements(),
overlap_nodeMap->MyGlobalElements(),
blocksize,
0,
comm));
// Build the vector and accessors
overlap_node_vec = Teuchos::rcp(new Epetra_Vector(*overlap_node_map, false));
mapsHaveChanged = true;
}
示例3: main
int main(int argc, char *argv[])
{
int i;
bool ierr, gerr;
gerr = true;
#ifdef HAVE_MPI
// Initialize MPI and setup an Epetra communicator
MPI_Init(&argc,&argv);
Teuchos::RCP<Epetra_MpiComm> Comm = Teuchos::rcp( new Epetra_MpiComm(MPI_COMM_WORLD) );
#else
// If we aren't using MPI, then setup a serial communicator.
Teuchos::RCP<Epetra_SerialComm> Comm = Teuchos::rcp( new Epetra_SerialComm() );
#endif
// number of global elements
int dim = 100;
int blockSize = 5;
bool verbose = false;
if (argc>1) {
if (argv[1][0]=='-' && argv[1][1]=='v') {
verbose = true;
}
}
// Construct a Map that puts approximately the same number of
// equations on each processor.
Teuchos::RCP<Epetra_Map> Map = Teuchos::rcp( new Epetra_Map(dim, 0, *Comm) );
// Get update list and number of local equations from newly created Map.
int NumMyElements = Map->NumMyElements();
std::vector<int> MyGlobalElements(NumMyElements);
Map->MyGlobalElements(&MyGlobalElements[0]);
// Create an integer std::vector NumNz that is used to build the Petra Matrix.
// NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation
// on this processor
std::vector<int> NumNz(NumMyElements);
// We are building a tridiagonal matrix where each row has (-1 2 -1)
// So we need 2 off-diagonal terms (except for the first and last equation)
for (i=0; i<NumMyElements; i++) {
if (MyGlobalElements[i]==0 || MyGlobalElements[i] == dim-1) {
NumNz[i] = 2;
}
else {
NumNz[i] = 3;
}
}
// Create an Epetra_Matrix
Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( new Epetra_CrsMatrix(Copy, *Map, &NumNz[0]) );
// Add rows one-at-a-time
// Need some vectors to help
// Off diagonal Values will always be -1
std::vector<double> Values(2);
Values[0] = -1.0; Values[1] = -1.0;
std::vector<int> Indices(2);
double two = 2.0;
int NumEntries;
for (i=0; i<NumMyElements; i++) {
if (MyGlobalElements[i]==0) {
Indices[0] = 1;
NumEntries = 1;
}
else if (MyGlobalElements[i] == dim-1) {
Indices[0] = dim-2;
NumEntries = 1;
}
else {
Indices[0] = MyGlobalElements[i]-1;
Indices[1] = MyGlobalElements[i]+1;
NumEntries = 2;
}
ierr = A->InsertGlobalValues(MyGlobalElements[i],NumEntries,&Values[0],&Indices[0]);
assert(ierr==0);
// Put in the diagonal entry
ierr = A->InsertGlobalValues(MyGlobalElements[i],1,&two,&MyGlobalElements[i]);
assert(ierr==0);
}
// Finish building the epetra matrix A
ierr = A->FillComplete();
assert(ierr==0);
// Issue several useful typedefs;
typedef Belos::MultiVec<double> EMV;
typedef Belos::Operator<double> EOP;
// Create an Epetra_MultiVector for an initial std::vector to start the solver.
// Note that this needs to have the same number of columns as the blocksize.
Teuchos::RCP<Belos::EpetraMultiVec> ivec = Teuchos::rcp( new Belos::EpetraMultiVec(*Map, blockSize) );
ivec->Random();
// Create an output manager to handle the I/O from the solver
Teuchos::RCP<Belos::OutputManager<double> > MyOM = Teuchos::rcp( new Belos::OutputManager<double>() );
if (verbose) {
MyOM->setVerbosity( Belos::Warnings );
//.........这里部分代码省略.........
示例4: main
int main(int argc, char *argv[])
{
using Teuchos::rcp_implicit_cast;
int i, ierr, gerr;
gerr = 0;
#ifdef HAVE_MPI
// Initialize MPI and setup an Epetra communicator
MPI_Init(&argc,&argv);
Teuchos::RCP<Epetra_MpiComm> Comm = Teuchos::rcp( new Epetra_MpiComm(MPI_COMM_WORLD) );
#else
// If we aren't using MPI, then setup a serial communicator.
Teuchos::RCP<Epetra_SerialComm> Comm = Teuchos::rcp( new Epetra_SerialComm() );
#endif
// number of global elements
int dim = 100;
int blockSize = 3;
// PID info
int MyPID = Comm->MyPID();
bool verbose = 0;
if (argc>1) {
if (argv[1][0]=='-' && argv[1][1]=='v') {
verbose = true;
}
}
// Construct a Map that puts approximately the same number of
// equations on each processor.
Teuchos::RCP<Epetra_Map> Map = Teuchos::rcp( new Epetra_Map(dim, 0, *Comm) );
// Get update list and number of local equations from newly created Map.
int NumMyElements = Map->NumMyElements();
std::vector<int> MyGlobalElements(NumMyElements);
Map->MyGlobalElements(&MyGlobalElements[0]);
// Create an integer std::vector NumNz that is used to build the Petra Matrix.
// NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation
// on this processor
std::vector<int> NumNz(NumMyElements);
// We are building a tridiagonal matrix where each row has (-1 2 -1)
// So we need 2 off-diagonal terms (except for the first and last equation)
for (i=0; i<NumMyElements; i++) {
if (MyGlobalElements[i]==0 || MyGlobalElements[i] == dim-1) {
NumNz[i] = 2;
}
else {
NumNz[i] = 3;
}
}
// Create an Epetra_Matrix
Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( new Epetra_CrsMatrix(Copy, *Map, &NumNz[0]) );
// Add rows one-at-a-time
// Need some vectors to help
// Off diagonal Values will always be -1
std::vector<double> Values(2);
Values[0] = -1.0; Values[1] = -1.0;
std::vector<int> Indices(2);
double two = 2.0;
int NumEntries;
for (i=0; i<NumMyElements; i++) {
if (MyGlobalElements[i]==0) {
Indices[0] = 1;
NumEntries = 1;
}
else if (MyGlobalElements[i] == dim-1) {
Indices[0] = dim-2;
NumEntries = 1;
}
else {
Indices[0] = MyGlobalElements[i]-1;
Indices[1] = MyGlobalElements[i]+1;
NumEntries = 2;
}
ierr = A->InsertGlobalValues(MyGlobalElements[i],NumEntries,&Values[0],&Indices[0]);
assert(ierr==0);
// Put in the diagonal entry
ierr = A->InsertGlobalValues(MyGlobalElements[i],1,&two,&MyGlobalElements[i]);
assert(ierr==0);
}
// Finish building the epetra matrix A
ierr = A->FillComplete();
assert(ierr==0);
// Create an Belos::EpetraOp from this Epetra_CrsMatrix
Teuchos::RCP<Belos::EpetraOp> op = Teuchos::rcp(new Belos::EpetraOp(A));
// Issue several useful typedefs;
typedef Belos::MultiVec<double> EMV;
typedef Belos::Operator<double> EOP;
//.........这里部分代码省略.........
示例5: main
int main(int argc, char *argv[])
{
int i;
bool ierr, gerr;
gerr = true;
#ifdef HAVE_MPI
// Initialize MPI and setup an Epetra communicator
MPI_Init(&argc,&argv);
Teuchos::RCP<Epetra_MpiComm> Comm = Teuchos::rcp( new Epetra_MpiComm(MPI_COMM_WORLD) );
#else
// If we aren't using MPI, then setup a serial communicator.
Teuchos::RCP<Epetra_SerialComm> Comm = Teuchos::rcp( new Epetra_SerialComm() );
#endif
// number of global elements
const int dim = 100;
const int blockSize = 5;
bool verbose = false;
if (argc>1) {
if (argv[1][0]=='-' && argv[1][1]=='v') {
verbose = true;
}
}
// Create an output manager to handle the I/O from the solver
Teuchos::RCP<Anasazi::OutputManager<double> > MyOM = Teuchos::rcp( new Anasazi::BasicOutputManager<double>() );
if (verbose) {
MyOM->setVerbosity( Anasazi::Warnings );
}
#ifndef HAVE_EPETRA_THYRA
MyOM->stream(Anasazi::Warnings)
<< "Please configure Anasazi with:" << std::endl
<< "--enable-epetra-thyra" << std::endl
<< "--enable-anasazi-thyra" << std::endl;
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return -1;
#endif
// Construct a Map that puts approximately the same number of
// equations on each processor.
Teuchos::RCP<Epetra_Map> Map = Teuchos::rcp( new Epetra_Map(dim, 0, *Comm) );
// Get update list and number of local equations from newly created Map.
int NumMyElements = Map->NumMyElements();
std::vector<int> MyGlobalElements(NumMyElements);
Map->MyGlobalElements(&MyGlobalElements[0]);
// Create an integer vector NumNz that is used to build the Petra Matrix.
// NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation
// on this processor
std::vector<int> NumNz(NumMyElements);
// We are building a tridiagonal matrix where each row has (-1 2 -1)
// So we need 2 off-diagonal terms (except for the first and last equation)
for (i=0; i<NumMyElements; i++) {
if (MyGlobalElements[i]==0 || MyGlobalElements[i] == dim-1) {
NumNz[i] = 2;
}
else {
NumNz[i] = 3;
}
}
// Create an Epetra_Matrix
Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( new Epetra_CrsMatrix(Copy, *Map, &NumNz[0]) );
// Add rows one-at-a-time
// Need some vectors to help
// Off diagonal Values will always be -1
std::vector<double> Values(2);
Values[0] = -1.0; Values[1] = -1.0;
std::vector<int> Indices(2);
double two = 2.0;
int NumEntries;
for (i=0; i<NumMyElements; i++) {
if (MyGlobalElements[i]==0) {
Indices[0] = 1;
NumEntries = 1;
}
else if (MyGlobalElements[i] == dim-1) {
Indices[0] = dim-2;
NumEntries = 1;
}
else {
Indices[0] = MyGlobalElements[i]-1;
Indices[1] = MyGlobalElements[i]+1;
NumEntries = 2;
}
ierr = A->InsertGlobalValues(MyGlobalElements[i],NumEntries,&Values[0],&Indices[0]);
assert(ierr==0);
// Put in the diagonal entry
ierr = A->InsertGlobalValues(MyGlobalElements[i],1,&two,&MyGlobalElements[i]);
assert(ierr==0);
}
//.........这里部分代码省略.........
示例6: rcp
TEUCHOS_UNIT_TEST(PdQuickGridDiscretization_MPI_np2, SimpleTensorProductMeshTest) {
Teuchos::RCP<Epetra_Comm> comm;
comm = rcp(new Epetra_MpiComm(MPI_COMM_WORLD));
int numProcs = comm->NumProc();
int rank = comm->MyPID();
TEST_COMPARE(numProcs, ==, 2);
if(numProcs != 2){
std::cerr << "Unit test runtime ERROR: utPeridigm_PdQuickGridDiscretization_MPI_np2 only makes sense on 2 processors" << std::endl;
return;
}
RCP<ParameterList> discParams = rcp(new ParameterList);
// create a 2x2x2 discretization
// specify a spherical neighbor search with the horizon a tad longer than the mesh spacing
discParams->set("Type", "PdQuickGrid");
discParams->set("NeighborhoodType", "Spherical");
ParameterList& quickGridParams = discParams->sublist("TensorProduct3DMeshGenerator");
quickGridParams.set("Type", "PdQuickGrid");
quickGridParams.set("X Origin", 0.0);
quickGridParams.set("Y Origin", 0.0);
quickGridParams.set("Z Origin", 0.0);
quickGridParams.set("X Length", 1.0);
quickGridParams.set("Y Length", 1.0);
quickGridParams.set("Z Length", 1.0);
quickGridParams.set("Number Points X", 2);
quickGridParams.set("Number Points Y", 2);
quickGridParams.set("Number Points Z", 2);
// initialize the horizon manager and set the horizon to 0.501
ParameterList blockParameterList;
ParameterList& blockParams = blockParameterList.sublist("My Block");
blockParams.set("Block Names", "block_1");
blockParams.set("Horizon", 0.501);
PeridigmNS::HorizonManager::self().loadHorizonInformationFromBlockParameters(blockParameterList);
// create the discretization
RCP<PdQuickGridDiscretization> discretization =
rcp(new PdQuickGridDiscretization(comm, discParams));
// sanity check, calling with a dimension other than 1 or 3 should throw an exception
TEST_THROW(discretization->getGlobalOwnedMap(0), Teuchos::Exceptions::InvalidParameter);
TEST_THROW(discretization->getGlobalOwnedMap(2), Teuchos::Exceptions::InvalidParameter);
TEST_THROW(discretization->getGlobalOwnedMap(4), Teuchos::Exceptions::InvalidParameter);
// basic checks on the 1d map
Teuchos::RCP<const Epetra_BlockMap> map = discretization->getGlobalOwnedMap(1);
TEST_ASSERT(map->NumGlobalElements() == 8);
TEST_ASSERT(map->NumMyElements() == 4);
TEST_ASSERT(map->ElementSize() == 1);
TEST_ASSERT(map->IndexBase() == 0);
TEST_ASSERT(map->UniqueGIDs() == true);
int* myGlobalElements = map->MyGlobalElements();
if(rank == 0){
TEST_ASSERT(myGlobalElements[0] == 0);
TEST_ASSERT(myGlobalElements[1] == 2);
TEST_ASSERT(myGlobalElements[2] == 4);
TEST_ASSERT(myGlobalElements[3] == 6);
}
if(rank == 1){
TEST_ASSERT(myGlobalElements[0] == 5);
TEST_ASSERT(myGlobalElements[1] == 7);
TEST_ASSERT(myGlobalElements[2] == 1);
TEST_ASSERT(myGlobalElements[3] == 3);
}
// check the 1d overlap map
// for this simple discretization, everything should be ghosted on both processors
Teuchos::RCP<const Epetra_BlockMap> overlapMap = discretization->getGlobalOverlapMap(1);
TEST_ASSERT(overlapMap->NumGlobalElements() == 16);
TEST_ASSERT(overlapMap->NumMyElements() == 8);
TEST_ASSERT(overlapMap->ElementSize() == 1);
TEST_ASSERT(overlapMap->IndexBase() == 0);
TEST_ASSERT(overlapMap->UniqueGIDs() == false);
myGlobalElements = overlapMap->MyGlobalElements();
if(rank == 0){
TEST_ASSERT(myGlobalElements[0] == 0);
TEST_ASSERT(myGlobalElements[1] == 2);
TEST_ASSERT(myGlobalElements[2] == 4);
TEST_ASSERT(myGlobalElements[3] == 6);
TEST_ASSERT(myGlobalElements[4] == 1);
TEST_ASSERT(myGlobalElements[5] == 3);
TEST_ASSERT(myGlobalElements[6] == 5);
TEST_ASSERT(myGlobalElements[7] == 7);
}
if(rank == 1){
TEST_ASSERT(myGlobalElements[0] == 5);
TEST_ASSERT(myGlobalElements[1] == 7);
TEST_ASSERT(myGlobalElements[2] == 1);
TEST_ASSERT(myGlobalElements[3] == 3);
TEST_ASSERT(myGlobalElements[4] == 0);
TEST_ASSERT(myGlobalElements[5] == 2);
TEST_ASSERT(myGlobalElements[6] == 4);
TEST_ASSERT(myGlobalElements[7] == 6);
}
//.........这里部分代码省略.........
示例7: getpartition_
void getpartition_(int& mySize, int* myIndicies) {
// Copy indices into array to send back to glimmer
partitionMap->MyGlobalElements(myIndicies);
}