本文整理汇总了C++中Teuchos::tuple方法的典型用法代码示例。如果您正苦于以下问题:C++ Teuchos::tuple方法的具体用法?C++ Teuchos::tuple怎么用?C++ Teuchos::tuple使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Teuchos
的用法示例。
在下文中一共展示了Teuchos::tuple方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: update
NOX::Abstract::MultiVector&
NOX::Thyra::MultiVector::
update(double alpha, const NOX::Abstract::MultiVector& a, double gamma)
{
using Teuchos::tuple;
const NOX::Thyra::MultiVector& aa =
dynamic_cast<const NOX::Thyra::MultiVector&>(a);
::Thyra::linear_combination<double>(tuple(alpha)().getConst(),
tuple(aa.thyraMultiVec.ptr().getConst())(), gamma,
thyraMultiVec.ptr());
return *this;
}
示例2: ele_wise_bound
void
ele_wise_bound (const ::Thyra::VectorBase<Scalar>& x_lo,
const ::Thyra::VectorBase<Scalar>& x_up,
const Teuchos::Ptr< ::Thyra::VectorBase<Scalar> > &x) {
using Teuchos::tuple;
using Teuchos::ptrInArg;
using Teuchos::null;
RTOpPack::TOpEleWiseBound<Scalar> ele_wise_bound_op;
::Thyra::applyOp<Scalar> (ele_wise_bound_op,
tuple (ptrInArg (x_lo), ptrInArg (x_up)), tuple (x),
null);
}
示例3: ele_wise_prune_upper
void
ele_wise_prune_upper (const ::Thyra::VectorBase<Scalar>& x,
const ::Thyra::VectorBase<Scalar>& x_up,
const Teuchos::Ptr< ::Thyra::VectorBase<Scalar> > &v,
const Scalar& eps) {
using Teuchos::tuple;
using Teuchos::ptrInArg;
using Teuchos::null;
RTOpPack::TOpEleWisePruneUpper_2_1<Scalar> ele_wise_prune_op(eps);
::Thyra::applyOp<Scalar> (ele_wise_prune_op,
tuple (ptrInArg (x), ptrInArg (x_up)), tuple (v),
null);
}
示例4: buildProblem
RCP<LinearProblem<Scalar,MultiVector<Scalar,int>,Operator<Scalar,int> > > buildProblem()
{
typedef ScalarTraits<Scalar> SCT;
typedef typename SCT::magnitudeType MT;
typedef Operator<Scalar,int> OP;
typedef MultiVector<Scalar,int> MV;
typedef OperatorTraits<Scalar,MV,OP> OPT;
typedef MultiVecTraits<Scalar,MV> MVT;
RCP<CrsMatrix<Scalar,int> > A = rcp(new CrsMatrix<Scalar,int>(vmap,rnnzmax));
if (mptestmypid == 0) {
// HB format is compressed column. CrsMatrix is compressed row.
const double *dptr = dvals;
const int *rptr = rowind;
for (int c=0; c<mptestdim; ++c) {
for (int colnnz=0; colnnz < colptr[c+1]-colptr[c]; ++colnnz) {
A->insertGlobalValues(*rptr-1,tuple(c),tuple<Scalar>(*dptr));
if (c != *rptr -1) {
A->insertGlobalValues(c,tuple(*rptr-1),tuple<Scalar>(*dptr));
}
++rptr;
++dptr;
}
}
}
// distribute matrix data to other nodes
A->fillComplete();
// Create initial MV and solution MV
RCP<MV> B, X;
X = rcp( new MV(vmap,numrhs) );
MVT::MvRandom( *X );
B = rcp( new MV(vmap,numrhs) );
OPT::Apply( *A, *X, *B );
MVT::MvInit( *X, 0.0 );
// Construct a linear problem instance with zero initial MV
RCP<LinearProblem<Scalar,MV,OP> > problem = rcp( new LinearProblem<Scalar,MV,OP>(A,X,B) );
problem->setLabel(Teuchos::typeName(SCT::one()));
// diagonal preconditioner
// if (precond) {
// Vector<Scalar,int> diags(A->getRowMap());
// A->getLocalDiagCopy(diags);
// for (Teuchos_Ordinal i=0; i<vmap->getNumMyEntries(); ++i) {
// TEST_FOR_EXCEPTION(diags[i] <= SCT::zero(), std::runtime_error,"Matrix is not positive-definite: " << diags[i]);
// diags[i] = SCT::one() / diags[i];
// }
// RCP<Operator<Scalar,int> > P = rcp(new DiagPrecond<Scalar,int>(diags));
// problem->setRightPrec(P);
// }
TEST_FOR_EXCEPT(problem->setProblem() == false);
return problem;
}
示例5: tuple
Teuchos::RCP<const Thyra::ProductVectorBase<Scalar> >
Thyra::castOrCreateProductVectorBase(const RCP<const VectorBase<Scalar> > v)
{
using Teuchos::rcp_dynamic_cast;
using Teuchos::tuple;
const RCP<const ProductVectorBase<Scalar> > prod_v =
rcp_dynamic_cast<const ProductVectorBase<Scalar> >(v);
if (nonnull(prod_v)) {
return prod_v;
}
return defaultProductVector<Scalar>(
productVectorSpace<Scalar>(tuple(v->space())()),
tuple(v)()
);
}
示例6: GenerateConstView
void MetricJacobian<NodeT,ScalarT>::SetDataViews(
ArrayRCP<NodeT>& mesh_data, map<string,int>& mesh_map_offset,
ArrayRCP<ScalarT>& soln_data, map<string,int>& soln_map_offset,
ArrayRCP<MetricJacobian<NodeT,ScalarT>::ResidT>& resid_data,
map<string,int>& resid_map_offset) {
using Teuchos::tuple;
// views of inputs
node_coords_ = GenerateConstView(mesh_data, mesh_map_offset.at("node_coords"),
tuple(num_elems_, num_nodes_per_elem_, dim_));
// views of outputs
jacob_ = GenerateView(mesh_data, mesh_map_offset.at("jacob"),
tuple(num_elems_, num_cub_points_, dim_, dim_));
jacob_inv_ = GenerateView(mesh_data, mesh_map_offset.at("jacob_inv"),
tuple(num_elems_, num_cub_points_, dim_, dim_));
jacob_det_ = GenerateView(mesh_data, mesh_map_offset.at("jacob_det"),
tuple(num_elems_, num_cub_points_));
}
示例7: create
// Create and return a simple example CrsMatrix, with row
// distribution over the given Map.
Teuchos::RCP<const TpetraMatrixType>
create (const Teuchos::RCP<const map_type>& map) const
{
using Teuchos::arcp;
using Teuchos::ArrayRCP;
using Teuchos::ArrayView;
using Teuchos::RCP;
using Teuchos::rcp;
using Teuchos::Time;
using Teuchos::TimeMonitor;
using Teuchos::tuple;
typedef Tpetra::global_size_t GST;
// Create a timer for sparse matrix creation.
RCP<Time> timer = TimeMonitor::getNewCounter ("Sparse matrix creation");
// Time the whole scope of this routine, not counting timer lookup.
TimeMonitor monitor (*timer);
// Create a Tpetra::Matrix using the Map, with dynamic allocation.
RCP<TpetraMatrixType> A = rcp (new TpetraMatrixType (map, 3));
// Add rows one at a time. Off diagonal values will always be -1.
const scalar_type two = static_cast<scalar_type>( 2.0);
const scalar_type negOne = static_cast<scalar_type>(-1.0);
const GST numGlobalElements = map->getGlobalNumElements ();
// const size_t numMyElements = map->getNodeNumElements ();
// The list of global elements owned by this MPI process.
ArrayView<const global_ordinal_type> myGlobalElements =
map->getNodeElementList ();
typedef typename ArrayView<const global_ordinal_type>::const_iterator iter_type;
for (iter_type it = myGlobalElements.begin(); it != myGlobalElements.end(); ++it) {
const local_ordinal_type i_local = *it;
const global_ordinal_type i_global = map->getGlobalElement (i_local);
// Can't insert local indices without a column map, so we insert
// global indices here.
if (i_global == 0) {
A->insertGlobalValues (i_global,
tuple (i_global, i_global+1),
tuple (two, negOne));
} else if (static_cast<GST> (i_global) == numGlobalElements - 1) {
A->insertGlobalValues (i_global,
tuple (i_global-1, i_global),
tuple (negOne, two));
} else {
A->insertGlobalValues (i_global,
tuple (i_global-1, i_global, i_global+1),
tuple (negOne, two, negOne));
}
}
// Finish up the matrix.
A->fillComplete ();
return A;
}
示例8:
Teuchos::RCP<const Thyra::LinearOpBase<Scalar> >
Thyra::multiply(
const RCP<const LinearOpBase<Scalar> > &A,
const RCP<const LinearOpBase<Scalar> > &B,
const std::string &M_label
)
{
using Teuchos::tuple;
RCP<DefaultMultipliedLinearOp<Scalar> > multOp =
defaultMultipliedLinearOp<Scalar>(tuple(A, B)());
if(M_label.length())
multOp->setObjectLabel(M_label);
return multOp;
}
示例9: reductions
void Thyra::reductions( const MultiVectorBase<Scalar>& V, const NormOp &op,
const ArrayView<typename ScalarTraits<Scalar>::magnitudeType> &norms )
{
using Teuchos::tuple; using Teuchos::ptrInArg; using Teuchos::null;
const int m = V.domain()->dim();
Array<RCP<RTOpPack::ReductTarget> > rcp_op_targs(m);
Array<Ptr<RTOpPack::ReductTarget> > op_targs(m);
for( int kc = 0; kc < m; ++kc ) {
rcp_op_targs[kc] = op.reduct_obj_create();
op_targs[kc] = rcp_op_targs[kc].ptr();
}
applyOp<Scalar>(op, tuple(ptrInArg(V)),
ArrayView<Ptr<MultiVectorBase<Scalar> > >(null),
op_targs );
for( int kc = 0; kc < m; ++kc ) {
norms[kc] = op(*op_targs[kc]);
}
}
示例10: main
int main(int argc, char *argv[])
{
#ifndef HAVE_TPETRA_COMPLEX_DOUBLE
# error "Anasazi: This test requires Scalar = std::complex<double> to be enabled in Tpetra."
#else
using Teuchos::RCP;
using Teuchos::rcp;
using Teuchos::tuple;
using std::cout;
using std::endl;
typedef std::complex<double> ST;
typedef Teuchos::ScalarTraits<ST> SCT;
typedef SCT::magnitudeType MT;
typedef Tpetra::MultiVector<ST> MV;
typedef MV::global_ordinal_type GO;
typedef Tpetra::Operator<ST> OP;
typedef Anasazi::MultiVecTraits<ST,MV> MVT;
typedef Anasazi::OperatorTraits<ST,MV,OP> OPT;
Tpetra::ScopeGuard tpetraScope (&argc, &argv);
bool success = false;
const ST ONE = SCT::one ();
int info = 0;
RCP<const Teuchos::Comm<int> > comm = Tpetra::getDefaultComm ();
const int MyPID = comm->getRank ();
bool verbose = false;
bool debug = false;
bool insitu = false;
bool herm = false;
std::string which("LM");
std::string filename;
int nev = 4;
int blockSize = 4;
MT tol = 1.0e-6;
Teuchos::CommandLineProcessor cmdp(false,true);
cmdp.setOption("verbose","quiet",&verbose,"Print messages and results.");
cmdp.setOption("debug","nodebug",&debug,"Print debugging information.");
cmdp.setOption("insitu","exsitu",&insitu,"Perform in situ restarting.");
cmdp.setOption("sort",&which,"Targetted eigenvalues (SM or LM).");
cmdp.setOption("herm","nonherm",&herm,"Solve Hermitian or non-Hermitian problem.");
cmdp.setOption("filename",&filename,"Filename for Harwell-Boeing test matrix (assumes non-Hermitian unless specified otherwise).");
cmdp.setOption("nev",&nev,"Number of eigenvalues to compute.");
cmdp.setOption("blockSize",&blockSize,"Block size for the algorithm.");
cmdp.setOption("tol",&tol,"Tolerance for convergence.");
if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) {
return -1;
}
if (debug) verbose = true;
if (filename == "") {
// get default based on herm
if (herm) {
filename = "mhd1280b.cua";
}
else {
filename = "mhd1280a.cua";
}
}
if (MyPID == 0) {
cout << Anasazi::Anasazi_Version() << endl << endl;
}
// Get the data from the HB file
int dim,dim2,nnz;
int rnnzmax;
double *dvals;
int *colptr,*rowind;
nnz = -1;
if (MyPID == 0) {
info = readHB_newmat_double(filename.c_str(),&dim,&dim2,&nnz,&colptr,&rowind,&dvals);
// find maximum NNZ over all rows
vector<int> rnnz(dim,0);
for (int *ri=rowind; ri<rowind+nnz; ++ri) {
++rnnz[*ri-1];
}
rnnzmax = *std::max_element(rnnz.begin(),rnnz.end());
}
else {
// address uninitialized data warnings
dvals = NULL;
colptr = NULL;
rowind = NULL;
}
Teuchos::broadcast(*comm,0,&info);
Teuchos::broadcast(*comm,0,&nnz);
Teuchos::broadcast(*comm,0,&dim);
Teuchos::broadcast(*comm,0,&rnnzmax);
if (info == 0 || nnz < 0) {
if (MyPID == 0) {
cout << "Error reading '" << filename << "'" << endl
<< "End Result: TEST FAILED" << endl;
}
//.........这里部分代码省略.........
示例11: TEUCHOS_UNIT_TEST_TEMPLATE_4_DECL
//
// Test for Tpetra::CrsMatrix::sumIntoGlobalValues(), with nonowned
// rows. The test creates the CrsMatrix with a static graph, so that
// globalAssemble() uses sumIntoGlobalValues() instead of
// insertGlobalValues() to merge in the incoming matrix entries. All
// calls to sumIntoGlobalValues() in this test are for nonowned rows,
// and all the calls are correct (that is, the processes that own
// those rows have entries in the corresponding columns, so that
// nonowned fill does not require creating new entries).
//
// mfh 16 Dec 2012: The one-template-argument version breaks explicit
// instantiation. Ah well.
//
//TEUCHOS_UNIT_TEST_TEMPLATE_1_DECL( CrsMatrix, NonlocalSumInto, CrsMatrixType )
TEUCHOS_UNIT_TEST_TEMPLATE_4_DECL( CrsMatrix, NonlocalSumInto, LocalOrdinalType, GlobalOrdinalType, ScalarType, NodeType )
{
using Tpetra::createContigMapWithNode;
using Tpetra::createNonContigMapWithNode;
using Tpetra::global_size_t;
using Tpetra::Map;
using Teuchos::Array;
using Teuchos::ArrayView;
using Teuchos::as;
using Teuchos::av_const_cast;
using Teuchos::Comm;
using Teuchos::RCP;
using Teuchos::rcp;
using Teuchos::rcp_const_cast;
using Teuchos::OrdinalTraits;
using Teuchos::outArg;
using Teuchos::ParameterList;
using Teuchos::parameterList;
using Teuchos::reduceAll;
using Teuchos::ScalarTraits;
using Teuchos::tuple;
using Teuchos::TypeNameTraits;
using std::endl;
#if 0
// Extract typedefs from the CrsMatrix specialization.
typedef typename CrsMatrixType::scalar_type scalar_type;
typedef typename CrsMatrixType::local_ordinal_type local_ordinal_type;
typedef typename CrsMatrixType::global_ordinal_type global_ordinal_type;
typedef typename CrsMatrixType::node_type node_type;
#endif // 0
typedef ScalarType scalar_type;
typedef LocalOrdinalType local_ordinal_type;
typedef GlobalOrdinalType global_ordinal_type;
typedef NodeType node_type;
// Typedefs derived from the above canonical typedefs.
typedef ScalarTraits<scalar_type> STS;
typedef Map<local_ordinal_type, global_ordinal_type, node_type> map_type;
// Abbreviation typedefs.
typedef scalar_type ST;
typedef local_ordinal_type LO;
typedef global_ordinal_type GO;
typedef node_type NT;
typedef Tpetra::CrsMatrix<ST, LO, GO, NT> CrsMatrixType;
// CrsGraph specialization corresponding to CrsMatrixType (the
// CrsMatrix specialization).
typedef Tpetra::CrsGraph<LO, GO, NT, typename CrsMatrixType::mat_solve_type> crs_graph_type;
////////////////////////////////////////////////////////////////////
// HERE BEGINS THE TEST.
////////////////////////////////////////////////////////////////////
const global_size_t INVALID = OrdinalTraits<global_size_t>::invalid();
// Get the default communicator.
RCP<const Comm<int> > comm = Tpetra::DefaultPlatform::getDefaultPlatform ().getComm ();
const int numProcs = comm->getSize ();
const int myRank = comm->getRank ();
if (myRank == 0) {
out << "Test with " << numProcs << " process" << (numProcs != 1 ? "es" : "") << endl;
}
// This test doesn't make much sense if there is only one MPI
// process. We let it pass trivially in that case.
if (numProcs == 1) {
out << "Number of processes in world is one; test passes trivially." << endl;
return;
}
// Get a Kokkos Node instance. It would be nice if we could pass in
// parameters here, but threads don't matter for this test; it's a
// test for distributed-memory capabilities.
if (myRank == 0) {
out << "Creating Kokkos Node of type " << TypeNameTraits<node_type>::name () << endl;
}
RCP<node_type> node;
{
ParameterList pl; // Kokkos Node types require a PL inout.
node = rcp (new node_type (pl));
//.........这里部分代码省略.........
示例12: TEUCHOS_UNIT_TEST_TEMPLATE_4_DECL
//
// Test for Tpetra::CrsMatrix::sumIntoGlobalValues(), with nonowned
// rows. This test is like CrsMatrix_NonlocalSumInto.cpp, except that
// it attempts to sum into remote entries that don't exist on the
// process that owns them. Currently, CrsMatrix silently ignores
// these entries. (This is how CrsMatrix implements Import and Export
// when the target matrix has a fixed column Map. Data are
// redistributed between the two row Maps, and "filtered" by the
// target matrix's column Map.) This unit test verifies that behavior
// by ensuring the following:
//
// 1. fillComplete() (actually globalAssemble()) does not throw an
// exception when the incoming entries don't exist on the process
// that owns their rows.
//
// 2. The ignored entries are actually ignored. They must change
// neither the structure nor the values of the matrix.
//
// mfh 16 Dec 2012: The one-template-argument version breaks explicit
// instantiation. Ah well.
//
//TEUCHOS_UNIT_TEST_TEMPLATE_1_DECL( CrsMatrix, NonlocalSumInto_Ignore, CrsMatrixType )
TEUCHOS_UNIT_TEST_TEMPLATE_4_DECL( CrsMatrix, NonlocalSumInto_Ignore, LocalOrdinalType, GlobalOrdinalType, ScalarType, NodeType )
{
using Tpetra::createContigMapWithNode;
using Tpetra::createNonContigMapWithNode;
using Tpetra::global_size_t;
using Tpetra::Map;
using Teuchos::Array;
using Teuchos::ArrayView;
using Teuchos::as;
using Teuchos::av_const_cast;
using Teuchos::Comm;
using Teuchos::RCP;
using Teuchos::rcp;
using Teuchos::rcp_const_cast;
using Teuchos::OrdinalTraits;
using Teuchos::outArg;
using Teuchos::ParameterList;
using Teuchos::parameterList;
using Teuchos::reduceAll;
using Teuchos::ScalarTraits;
using Teuchos::tuple;
using Teuchos::TypeNameTraits;
using std::endl;
#if 0
// Extract typedefs from the CrsMatrix specialization.
typedef typename CrsMatrixType::scalar_type scalar_type;
typedef typename CrsMatrixType::local_ordinal_type local_ordinal_type;
typedef typename CrsMatrixType::global_ordinal_type global_ordinal_type;
typedef typename CrsMatrixType::node_type node_type;
#endif // 0
typedef ScalarType scalar_type;
typedef LocalOrdinalType local_ordinal_type;
typedef GlobalOrdinalType global_ordinal_type;
typedef NodeType node_type;
// Typedefs derived from the above canonical typedefs.
typedef ScalarTraits<scalar_type> STS;
typedef Map<local_ordinal_type, global_ordinal_type, node_type> map_type;
// Abbreviation typedefs.
typedef scalar_type ST;
typedef local_ordinal_type LO;
typedef global_ordinal_type GO;
typedef node_type NT;
typedef Tpetra::CrsMatrix<ST, LO, GO, NT> CrsMatrixType;
// CrsGraph specialization corresponding to CrsMatrixType (the
// CrsMatrix specialization).
typedef Tpetra::CrsGraph<LO, GO, NT, typename CrsMatrixType::mat_solve_type> crs_graph_type;
////////////////////////////////////////////////////////////////////
// HERE BEGINS THE TEST.
////////////////////////////////////////////////////////////////////
const global_size_t INVALID = OrdinalTraits<global_size_t>::invalid();
// Get the default communicator.
RCP<const Comm<int> > comm = Tpetra::DefaultPlatform::getDefaultPlatform ().getComm ();
const int numProcs = comm->getSize ();
const int myRank = comm->getRank ();
if (myRank == 0) {
out << "Test with " << numProcs << " process" << (numProcs != 1 ? "es" : "") << endl;
}
// This test doesn't make much sense if there is only one MPI
// process. We let it pass trivially in that case.
if (numProcs == 1) {
out << "Number of processes in world is one; test passes trivially." << endl;
return;
}
// Get a Kokkos Node instance. It would be nice if we could pass in
// parameters here, but threads don't matter for this test; it's a
// test for distributed-memory capabilities.
//.........这里部分代码省略.........