本文整理汇总了C++中tpetra::MultiVector::getMap方法的典型用法代码示例。如果您正苦于以下问题:C++ MultiVector::getMap方法的具体用法?C++ MultiVector::getMap怎么用?C++ MultiVector::getMap使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tpetra::MultiVector
的用法示例。
在下文中一共展示了MultiVector::getMap方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: importer
// ============================================================================
void
BorderingHelpers::
dissect(const Tpetra::MultiVector<double,int,int> & x,
Tpetra::MultiVector<double,int,int> & xSmall,
double * lambda
)
{
#ifndef NDEBUG
TEUCHOS_ASSERT_EQUALITY(x.NumVectors(), xSmall.NumVectors());
// Make sure the maps are matching.
std::shared_ptr<const Tpetra::Map<int,int>> extendedMap =
nosh::BorderingHelpers::extendMapBy1(xSmall.getMap());
TEUCHOS_ASSERT(x.getMap().SameAs(*extendedMap));
#endif
Epetra_Import importer(xSmall.getMap(), x.getMap());
// Strip off the phase constraint variable.
xSmall.Import(x, importer, Insert);
// TODO Check if we need lambda on all procs.
if (x.getMap().Comm().MyPID() == 0) {
const int n = x.MyLength();
for (int k = 0; k < x.NumVectors(); k++)
lambda[k] = (*(x(k)))[n - 1];
}
return;
}
示例2: timeMon
void
Chebyshev<MatrixType>::
apply (const Tpetra::MultiVector<scalar_type, local_ordinal_type, global_ordinal_type, node_type>& X,
Tpetra::MultiVector<scalar_type, local_ordinal_type, global_ordinal_type, node_type>& Y,
Teuchos::ETransp mode,
scalar_type alpha,
scalar_type beta) const
{
{
Teuchos::TimeMonitor timeMon (*Time_);
// compute() calls initialize() if it hasn't already been called.
// Thus, we only need to check isComputed().
TEUCHOS_TEST_FOR_EXCEPTION(! isComputed(), std::runtime_error,
"Ifpack2::Chebyshev::apply(): You must call the compute() method before "
"you may call apply().");
TEUCHOS_TEST_FOR_EXCEPTION(
X.getNumVectors() != Y.getNumVectors(),
std::runtime_error,
"Ifpack2::Chebyshev::apply(): X and Y must have the same number of "
"columns. X.getNumVectors() = " << X.getNumVectors() << " != "
<< "Y.getNumVectors() = " << Y.getNumVectors() << ".");
#ifdef HAVE_TEUCHOS_DEBUG
{
// The relation 'isSameAs' is transitive. It's also a collective,
// so we don't have to do a "shared" test for exception (i.e., a
// global reduction on the test value).
TEUCHOS_TEST_FOR_EXCEPTION(
! X.getMap ()->isSameAs (*getDomainMap ()),
std::runtime_error,
"Ifpack2::Chebyshev: The domain Map of the matrix must be the same as "
"the Map of the input vector(s) X.");
TEUCHOS_TEST_FOR_EXCEPTION(
! Y.getMap ()->isSameAs (*getRangeMap ()),
std::runtime_error,
"Ifpack2::Chebyshev: The range Map of the matrix must be the same as "
"the Map of the output vector(s) Y.");
}
#endif // HAVE_TEUCHOS_DEBUG
applyImpl (X, Y, mode, alpha, beta);
}
++NumApply_;
ApplyTime_ += Time_->totalElapsedTime ();
}
示例3: X_tmp
void IdentitySolver<MatrixType>::
apply (const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& X,
Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& Y,
Teuchos::ETransp /*mode*/,
scalar_type alpha,
scalar_type beta) const
{
using Teuchos::RCP;
typedef Teuchos::ScalarTraits<scalar_type> STS;
typedef Tpetra::MultiVector<scalar_type, local_ordinal_type,
global_ordinal_type, node_type> MV;
TEUCHOS_TEST_FOR_EXCEPTION(
! isComputed (), std::runtime_error,
"Ifpack2::IdentitySolver::apply: If compute() has not yet been called, "
"or if you have changed the matrix via setMatrix(), "
"you must call compute() before you may call this method.");
// "Identity solver" does what it says: it's the identity operator.
// We have to Export if the domain and range Maps are not the same.
// Otherwise, this operator would be a permutation, not the identity.
if (export_.is_null ()) {
Y.update (alpha, X, beta);
}
else {
if (alpha == STS::one () && beta == STS::zero ()) { // the common case
Y.doExport (X, *export_, Tpetra::REPLACE);
}
else {
// We know that the domain and range Maps are compatible. First
// bring X into the range Map via Export. Then compute in place
// in Y.
MV X_tmp (Y.getMap (), Y.getNumVectors ());
X_tmp.doExport (X, *export_, Tpetra::REPLACE);
Y.update (alpha, X_tmp, beta);
}
}
++numApply_;
}
示例4: rcp
static Teuchos::RCP<Tpetra::MultiVector<Scalar,LO,GO,Node> > Clone( const Tpetra::MultiVector<Scalar,LO,GO,Node>& mv, const int numvecs )
{
return Teuchos::rcp( new Tpetra::MultiVector<Scalar,LO,GO,Node>(mv.getMap(),numvecs));
}
示例5: findUniqueGids
size_t findUniqueGids(
Tpetra::MultiVector<gno_t, lno_t, gno_t> &keys,
Tpetra::Vector<gno_t, lno_t, gno_t> &gids
)
{
// Input: Tpetra MultiVector of keys; key length = numVectors()
// May contain duplicate keys within a processor.
// May contain duplicate keys across processors.
// Input: Empty Tpetra Vector with same map for holding the results
// Output: Filled gids vector, containing unique global numbers for
// each unique key. Global numbers are in range [0,#UniqueKeys).
size_t num_keys = keys.getLocalLength();
size_t num_entries = keys.getNumVectors();
#ifdef HAVE_ZOLTAN2_MPI
MPI_Comm mpicomm = Teuchos::getRawMpiComm(*(keys.getMap()->getComm()));
#else
// Zoltan's siMPI will be used here
{
int flag;
MPI_Initialized(&flag);
if (!flag) {
int narg = 0;
char **argv = NULL;
MPI_Init(&narg, &argv);
}
}
MPI_Comm mpicomm = MPI_COMM_WORLD; // Will get MPI_COMM_WORLD from siMPI
#endif
int num_gid = sizeof(gno_t)/sizeof(ZOLTAN_ID_TYPE) * num_entries;
int num_user = sizeof(gno_t);
// Buffer the keys for Zoltan_DD
Teuchos::ArrayRCP<const gno_t> *tmpKeyVecs =
new Teuchos::ArrayRCP<const gno_t>[num_entries];
for (size_t v = 0; v < num_entries; v++) tmpKeyVecs[v] = keys.getData(v);
ZOLTAN_ID_PTR ddkeys = new ZOLTAN_ID_TYPE[num_gid * num_keys];
size_t idx = 0;
for (size_t i = 0; i < num_keys; i++) {
for (size_t v = 0; v < num_entries; v++) {
ZOLTAN_ID_PTR ddkey = &(ddkeys[idx]);
TPL_Traits<ZOLTAN_ID_PTR,gno_t>::ASSIGN(ddkey, tmpKeyVecs[v][i]);
idx += TPL_Traits<ZOLTAN_ID_PTR,gno_t>::NUM_ID;
}
}
delete [] tmpKeyVecs;
// Allocate memory for the result
char *ddnewgids = new char[num_user * num_keys];
// Compute the new GIDs
size_t nUnique = findUniqueGidsCommon<gno_t>(num_keys, num_gid,
ddkeys, ddnewgids, mpicomm);
// Copy the result into the output vector
gno_t *result = (gno_t *)ddnewgids;
for (size_t i = 0; i < num_keys; i++)
gids.replaceLocalValue(i, result[i]);
// Clean up
delete [] ddkeys;
delete [] ddnewgids;
return nUnique;
}