本文整理汇总了C++中ArrayView::end方法的典型用法代码示例。如果您正苦于以下问题:C++ ArrayView::end方法的具体用法?C++ ArrayView::end怎么用?C++ ArrayView::end使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ArrayView
的用法示例。
在下文中一共展示了ArrayView::end方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: local_graph
//-----------------------------------------------------------------------------
Graph GraphBuilder::local_graph(const Mesh& mesh, const GenericDofMap& dofmap0,
const GenericDofMap& dofmap1)
{
Timer timer("Build local sparsity graph from dofmaps");
// Create empty graph
const std::size_t n = dofmap0.global_dimension();
Graph graph(n);
// Build graph
for (CellIterator cell(mesh); !cell.end(); ++cell)
{
const ArrayView<const dolfin::la_index> dofs0
= dofmap0.cell_dofs(cell->index());
const ArrayView<const dolfin::la_index> dofs1
= dofmap1.cell_dofs(cell->index());
//std::vector<dolfin::la_index>::const_iterator node0, node1;
for (auto node0 = dofs0.begin(); node0 != dofs0.end(); ++node0)
for (auto node1 = dofs1.begin(); node1 != dofs1.end(); ++node1)
if (*node0 != *node1)
graph[*node0].insert(*node1);
}
return graph;
}
示例2:
void DefaultProductMultiVector<Scalar>::initializeImpl(
const RCP<const DefaultProductVectorSpace<Scalar> > &productSpace_in,
const ArrayView<const RCP<MultiVectorType> > &multiVecs
)
{
// This function provides the "strong" guarantee (i.e. if an exception is
// thrown, then *this will be left in the original state as before the
// function was called)!
#ifdef TEUCHOS_DEBUG
TEUCHOS_ASSERT(nonnull(productSpace_in));
TEUCHOS_ASSERT_EQUALITY(multiVecs.size(), productSpace_in->numBlocks());
#endif // TEUCHOS_DEBUG
const RCP<const VectorSpaceBase<Scalar> >
theDomain = multiVecs[0]->domain();
const int numBlocks = productSpace_in->numBlocks();
#ifdef TEUCHOS_DEBUG
for ( int k = 0; k < numBlocks; ++k ) {
THYRA_ASSERT_VEC_SPACES(
Teuchos::TypeNameTraits<DefaultProductMultiVector<Scalar> >::name(),
*theDomain, *multiVecs[k]->domain()
);
}
#endif
productSpace_ = productSpace_in;
numBlocks_ = numBlocks;
multiVecs_.assign(multiVecs.begin(),multiVecs.end());
}
示例3: assign
template<class T> inline
void ArrayRCP<T>::deepCopy(const ArrayView<const T>& av)
{
if (av.size() == 0) {
*this = null;
return;
}
assign(av.begin(), av.end());
}
示例4:
void ArrayView<T>::assign(const ArrayView<const T>& array) const
{
debug_assert_valid_ptr();
debug_assert_not_null();
if (this->getRawPtr()==array.getRawPtr() && this->size()==array.size())
return; // Assignment to self
debug_assert_in_range(0,array.size());
std::copy( array.begin(), array.end(), this->begin() );
// Note: Above, in debug mode, the iterators are range checked! In
// optimized mode, these are raw pointers which should run very fast!
}
示例5: if
bool
Map<LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType> >::
locallySameAs (const Map<LocalOrdinal, GlobalOrdinal, node_type>& map) const
{
using Teuchos::ArrayView;
typedef GlobalOrdinal GO;
typedef typename ArrayView<const GO>::size_type size_type;
// If both Maps are contiguous, we can compare their GID ranges
// easily by looking at the min and max GID on this process.
// Otherwise, we'll compare their GID lists. If only one Map is
// contiguous, then we only have to call getNodeElementList() on
// the noncontiguous Map. (It's best to avoid calling it on a
// contiguous Map, since it results in unnecessary storage that
// persists for the lifetime of the Map.)
if (getNodeNumElements () != map.getNodeNumElements ()) {
return false;
}
else if (getMinGlobalIndex () != map.getMinGlobalIndex () ||
getMaxGlobalIndex () != map.getMaxGlobalIndex ()) {
return false;
}
else {
if (isContiguous ()) {
if (map.isContiguous ()) {
return true; // min and max match, so the ranges match.
}
else { // *this is contiguous, but map is not contiguous
TEUCHOS_TEST_FOR_EXCEPTION(
! this->isContiguous () || map.isContiguous (), std::logic_error,
"Tpetra::Map::locallySameAs: BUG");
ArrayView<const GO> rhsElts = map.getNodeElementList ();
const GO minLhsGid = this->getMinGlobalIndex ();
const size_type numRhsElts = rhsElts.size ();
for (size_type k = 0; k < numRhsElts; ++k) {
const GO curLhsGid = minLhsGid + static_cast<GO> (k);
if (curLhsGid != rhsElts[k]) {
return false; // stop on first mismatch
}
}
return true;
}
}
else if (map.isContiguous ()) { // *this is not contiguous, but map is
TEUCHOS_TEST_FOR_EXCEPTION(
this->isContiguous () || ! map.isContiguous (), std::logic_error,
"Tpetra::Map::locallySameAs: BUG");
ArrayView<const GO> lhsElts = this->getNodeElementList ();
const GO minRhsGid = map.getMinGlobalIndex ();
const size_type numLhsElts = lhsElts.size ();
for (size_type k = 0; k < numLhsElts; ++k) {
const GO curRhsGid = minRhsGid + static_cast<GO> (k);
if (curRhsGid != lhsElts[k]) {
return false; // stop on first mismatch
}
}
return true;
}
else { // neither *this nor map are contiguous
// std::equal requires that the latter range is as large as
// the former. We know the ranges have equal length, because
// they have the same number of local entries.
ArrayView<const GO> lhsElts = getNodeElementList ();
ArrayView<const GO> rhsElts = map.getNodeElementList ();
return std::equal (lhsElts.begin (), lhsElts.end (), rhsElts.begin ());
}
}
}
示例6: testIdentifierModel
void testIdentifierModel(std::string fname, zgno_t xdim, zgno_t ydim, zgno_t zdim,
const RCP<const Comm<int> > &comm, bool consecutiveIds)
{
int rank = comm->getRank();
int fail = 0, gfail = 0;
std::bitset<Zoltan2::NUM_MODEL_FLAGS> modelFlags = 0;
if (consecutiveIds)
modelFlags.set(Zoltan2::IDS_MUST_BE_GLOBALLY_CONSECUTIVE);
RCP<const Zoltan2::Environment> env = rcp(new Zoltan2::Environment);
//////////////////////////////////////////////////////////////
// Use an Tpetra::CrsMatrix for the user data.
//////////////////////////////////////////////////////////////
typedef Tpetra::CrsMatrix<zscalar_t, zlno_t, zgno_t> tcrsMatrix_t;
UserInputForTests *uinput;
if (fname.size() > 0)
uinput = new UserInputForTests(testDataFilePath, fname, comm, true);
else
uinput = new UserInputForTests(xdim,ydim,zdim,string(""),comm, true, true);
RCP<tcrsMatrix_t > M = uinput->getUITpetraCrsMatrix();
zlno_t nLocalIds = M->getNodeNumRows();
zgno_t nGlobalIds = M->getGlobalNumRows();
ArrayView<const zgno_t> idList = M->getRowMap()->getNodeElementList();
std::set<zgno_t> idSet(idList.begin(), idList.end());
//////////////////////////////////////////////////////////////
// Create an IdentifierModel with this input
//////////////////////////////////////////////////////////////
typedef Zoltan2::XpetraCrsMatrixAdapter<tcrsMatrix_t> adapter_t;
typedef Zoltan2::MatrixAdapter<tcrsMatrix_t> base_adapter_t;
typedef Zoltan2::StridedData<zlno_t, zscalar_t> input_t;
RCP<const adapter_t> ia = Teuchos::rcp(new adapter_t(M));
Zoltan2::IdentifierModel<base_adapter_t> *model = NULL;
RCP<const base_adapter_t> base_ia =
Teuchos::rcp_dynamic_cast<const base_adapter_t>(ia);
try {
model = new Zoltan2::IdentifierModel<base_adapter_t>(
base_ia, env, comm, modelFlags);
}
catch (std::exception &e) {
std::cerr << rank << ") " << e.what() << std::endl;
fail = 1;
}
gfail = globalFail(comm, fail);
if (gfail)
printFailureCode(comm, fail);
// Test the IdentifierModel interface
if (model->getLocalNumIdentifiers() != size_t(nLocalIds)) {
std::cerr << rank << ") getLocalNumIdentifiers "
<< model->getLocalNumIdentifiers() << " "
<< nLocalIds << std::endl;
fail = 2;
}
if (!fail && model->getGlobalNumIdentifiers() != size_t(nGlobalIds)) {
std::cerr << rank << ") getGlobalNumIdentifiers "
<< model->getGlobalNumIdentifiers() << " "
<< nGlobalIds << std::endl;
fail = 3;
}
gfail = globalFail(comm, fail);
if (gfail)
printFailureCode(comm, fail);
ArrayView<const zgno_t> gids;
ArrayView<input_t> wgts;
model->getIdentifierList(gids, wgts);
if (!fail && gids.size() != nLocalIds) {
std::cerr << rank << ") getIdentifierList IDs "
<< gids.size() << " "
<< nLocalIds << std::endl;
fail = 5;
}
if (!fail && wgts.size() != 0) {
std::cerr << rank << ") getIdentifierList Weights "
<< wgts.size() << " "
<< 0 << std::endl;
fail = 6;
}
for (zlno_t i=0; !fail && i < nLocalIds; i++) {
std::set<zgno_t>::iterator next = idSet.find(gids[i]);
//.........这里部分代码省略.........
示例7: v
template<class T> inline
std::vector<T> Teuchos::createVector( const ArrayView<const T> &av )
{
std::vector<T> v(av.begin(), av.end());
return v;
}
示例8: assemble_interior_facets
//-----------------------------------------------------------------------------
void OpenMpAssembler::assemble_interior_facets(
GenericTensor& A,
const Form& a, UFC& _ufc,
std::shared_ptr<const MeshFunction<std::size_t>> domains,
std::shared_ptr<const MeshFunction<std::size_t>> cell_domains,
std::vector<double>* values)
{
warning("OpenMpAssembler::assemble_interior_facets is untested.");
// Extract mesh
const Mesh& mesh = a.mesh();
// Topological dimension
const std::size_t D = mesh.topology().dim();
dolfin_assert(!values);
// Skip assembly if there are no interior facet integrals
if (!_ufc.form.has_interior_facet_integrals())
return;
Timer timer("Assemble interior facets");
// Set number of OpenMP threads (from parameter systems)
omp_set_num_threads(parameters["num_threads"]);
// Get integral for sub domain (if any)
bool use_domains = domains && !domains->empty();
bool use_cell_domains = cell_domains && !cell_domains->empty();
if (use_domains)
{
dolfin_error("OpenMPAssembler.cpp",
"perform multithreaded assembly using OpenMP assembler",
"Subdomains are not yet handled");
}
// Color mesh
std::vector<std::size_t> coloring_type = a.coloring(D - 1);
mesh.color(coloring_type);
// Dummy UFC object since each thread needs to created its own UFC object
UFC ufc(_ufc);
// Form rank
const std::size_t form_rank = ufc.form.rank();
// Collect pointers to dof maps
std::vector<const GenericDofMap*> dofmaps;
for (std::size_t i = 0; i < form_rank; ++i)
dofmaps.push_back(a.function_space(i)->dofmap().get());
// Vector to hold dofs for cells
std::vector<std::vector<dolfin::la_index>> macro_dofs(form_rank);
// Interior facet integral
const ufc::interior_facet_integral* integral
= ufc.default_interior_facet_integral.get();
// Compute facets and facet - cell connectivity if not already computed
mesh.init(D - 1);
mesh.init(D - 1, D);
dolfin_assert(mesh.ordered());
// Get coloring data
std::map<const std::vector<std::size_t>,
std::pair<std::vector<std::size_t>,
std::vector<std::vector<std::size_t>>>>::const_iterator
mesh_coloring;
mesh_coloring = mesh.topology().coloring.find(coloring_type);
// Check that requested coloring has been computed
if (mesh_coloring == mesh.topology().coloring.end())
{
dolfin_error("OpenMPAssembler.cpp",
"perform multithreaded assembly using OpenMP assembler",
"Requested mesh coloring has not been computed");
}
// Get coloring data
const std::vector<std::vector<std::size_t>>& entities_of_color
= mesh_coloring->second.second;
// UFC cells and vertex coordinates
ufc::cell ufc_cell0, ufc_cell1;
std::vector<double> vertex_coordinates0, vertex_coordinates1;
// Assemble over interior facets (loop over colours, then cells of same color)
const std::size_t num_colors = entities_of_color.size();
for (std::size_t color = 0; color < num_colors; ++color)
{
// Get the array of facet indices of current color
const std::vector<std::size_t>& colored_facets = entities_of_color[color];
// Number of facets of current color
const int num_facets = colored_facets.size();
// OpenMP test loop over cells of the same color
Progress p(AssemblerBase::progress_message(A.rank(), "interior facets"),
//.........这里部分代码省略.........
示例9: newdist
TEUCHOS_UNIT_TEST( Distributor, createfromsendsandrecvs)
{
using Teuchos::outArg;
using Teuchos::RCP;
using Teuchos::REDUCE_MIN;
using Teuchos::reduceAll;
using Teuchos::TimeMonitor;
using std::endl;
//typedef Tpetra::Vector<>::scalar_type SC;
auto comm = Tpetra::DefaultPlatform::getDefaultPlatform ().getComm ();
int my_proc = comm->getRank();
//int nprocs = comm->getSize(); // unused
// Set debug = true if you want immediate debug output to stderr.
const bool debug = true;
Teuchos::RCP<Teuchos::FancyOStream> outPtr =
debug ?
Teuchos::getFancyOStream (Teuchos::rcpFromRef (std::cerr)) :
Teuchos::rcpFromRef (out);
Teuchos::FancyOStream& myOut = *outPtr;
myOut << "Distributor createfromsendsandrecvs" << endl;
Teuchos::OSTab tab1 (myOut);
myOut << "Create CrsGraph, BlockCrsMatrix, and Vectors" << endl;
auto G = getTpetraGraph (comm);
auto A = getTpetraBlockCrsMatrix (G);
Tpetra::Vector<> X (A->getDomainMap ());
Tpetra::Vector<> Y (A->getRangeMap ());
myOut << "Get the CrsGraph's Import object" << endl;
RCP<const Tpetra::Import<> > importer = G->getImporter ();
if (importer.is_null ()) {
TEST_EQUALITY_CONST( comm->getSize (), 1 );
myOut << "The CrsGraph's Import object is null";
if (success) {
myOut << ". This is to be expected when the communicator only has 1 "
"process. We'll say this test succeeded and be done with it." << endl;
}
else {
myOut << ", but the communicator has " << comm->getSize () << " != 1 "
"processes. That means we didn't construct the test graph correctly. "
"It makes no sense to continue this test beyond this point." << endl;
}
return;
}
auto dist = importer->getDistributor();
myOut << "Build up arrays to construct equivalent Distributor" << endl;
const ArrayView<const int> procF = dist.getProcsFrom();
const ArrayView<const int> procT = dist.getProcsTo();
const ArrayView<const size_t> lenF = dist.getLengthsFrom();
const ArrayView<const size_t> lenT = dist.getLengthsTo();
// This section takes the consolidated procF and procT with the length and re-builds
// the un-consolidated lists of processors from and to that
// This is needed because in Tpetra::constructExpert, the unconsolidated procsFrom and ProcsTo
// will be used.
Teuchos::Array<int> nuF;
Teuchos::Array<int> nuT;
int sumLenF=0;
for ( ArrayView<const size_t>::iterator b = lenF.begin(); b!=lenF.end(); ++b)
sumLenF+=(*b);
int sumLenT=0;
for ( ArrayView<const size_t>::iterator b = lenT.begin(); b!=lenT.end(); ++b)
sumLenT+=(*b);
nuF.resize(sumLenF);
nuT.resize(sumLenT);
size_t p=0;
for ( size_t j = 0; j<(size_t)procF.size(); ++j) {
size_t lend = p+lenF[j];
for (size_t i = p ; i < lend ; ++i)
nuF[i]=procF[j];
p+=lenF[j];
}
p=0;
for ( size_t j = 0; j<(size_t) procT.size(); ++j) {
size_t lend = p+lenT[j];
for (size_t i = p ; i < lend ; ++i)
nuT[i]=procT[j];
p+=lenT[j];
}
myOut << "Create a new Distributor using createFromSendsAndRecvs" << endl;
Tpetra::Distributor newdist(comm);
TEST_NOTHROW( newdist.createFromSendsAndRecvs(nuT,nuF) );
{
int lclSuccess = success ? 1 : 0;
int gblSuccess = 0;
reduceAll<int, int> (*comm, REDUCE_MIN, lclSuccess, outArg (gblSuccess) );
TEST_EQUALITY_CONST( gblSuccess, 1 );
if (gblSuccess != 1) {
myOut << "Test FAILED on some process; giving up early" << endl;
}
}
//.........这里部分代码省略.........
示例10: p
//-----------------------------------------------------------------------------
void
SparsityPatternBuilder::build(GenericSparsityPattern& sparsity_pattern,
const Mesh& mesh,
const std::vector<const GenericDofMap*> dofmaps,
bool cells,
bool interior_facets,
bool exterior_facets,
bool vertices,
bool diagonal,
bool init,
bool finalize)
{
// Get global dimensions and local range
const std::size_t rank = dofmaps.size();
std::vector<std::size_t> global_dimensions(rank);
std::vector<std::pair<std::size_t, std::size_t>> local_range(rank);
std::vector<ArrayView<const std::size_t>> local_to_global(rank);
std::vector<ArrayView<const int>> off_process_owner(rank);
for (std::size_t i = 0; i < rank; ++i)
{
global_dimensions[i] = dofmaps[i]->global_dimension();
local_range[i] = dofmaps[i]->ownership_range();
local_to_global[i].set(dofmaps[i]->local_to_global_unowned());
off_process_owner[i].set(dofmaps[i]->off_process_owner());
}
dolfin_assert(!dofmaps.empty());
dolfin_assert(dofmaps[0]);
std::vector<std::size_t> block_sizes(rank);
for (std::size_t i = 0; i < rank; ++i)
block_sizes[i] = dofmaps[i]->block_size;
// Initialise sparsity pattern
if (init)
{
sparsity_pattern.init(mesh.mpi_comm(), global_dimensions, local_range,
local_to_global, off_process_owner, block_sizes);
}
// Only build for rank >= 2 (matrices and higher order tensors) that
// require sparsity details
if (rank < 2)
return;
// Vector to store macro-dofs, if required (for interior facets)
std::vector<std::vector<dolfin::la_index>> macro_dofs(rank);
// Create vector to point to dofs
std::vector<ArrayView<const dolfin::la_index>> dofs(rank);
// FIXME: We iterate over the entire mesh even if the function space
// is restricted. This works out fine since the local dofmap
// returned on each cell will be an empty vector, but we might think
// about optimizing this further.
// Build sparsity pattern for cell integrals
if (cells)
{
Progress p("Building sparsity pattern over cells", mesh.num_cells());
for (CellIterator cell(mesh); !cell.end(); ++cell)
{
// Tabulate dofs for each dimension and get local dimensions
for (std::size_t i = 0; i < rank; ++i)
dofs[i] = dofmaps[i]->cell_dofs(cell->index());
// Insert non-zeroes in sparsity pattern
sparsity_pattern.insert_local(dofs);
p++;
}
}
// Build sparsity pattern for vertex/point integrals
const std::size_t D = mesh.topology().dim();
if (vertices)
{
mesh.init(0);
mesh.init(0, D);
std::vector<std::vector<dolfin::la_index>> global_dofs(rank);
//std::vector<const std::vector<dolfin::la_index>* > global_dofs_p(rank);
std::vector<std::vector<std::size_t>> local_to_local_dofs(rank);
// Resize local dof map vector
for (std::size_t i = 0; i < rank; ++i)
{
global_dofs[i].resize(dofmaps[i]->num_entity_dofs(0));
local_to_local_dofs[i].resize(dofmaps[i]->num_entity_dofs(0));
}
Progress p("Building sparsity pattern over vertices", mesh.num_vertices());
for (VertexIterator vert(mesh); !vert.end(); ++vert)
{
// Get mesh cell to which mesh vertex belongs (pick first)
Cell mesh_cell(mesh, vert->entities(D)[0]);
// Check that cell is not a ghost
dolfin_assert(!mesh_cell.is_ghost());
// Get local index of vertex with respect to the cell
//.........这里部分代码省略.........
示例11: rcp
//
// Test for Tpetra::CrsMatrix::sumIntoGlobalValues(), with nonowned
// rows. This test is like CrsMatrix_NonlocalSumInto.cpp, except that
// it attempts to sum into remote entries that don't exist on the
// process that owns them. Currently, CrsMatrix silently ignores
// these entries. (This is how CrsMatrix implements Import and Export
// when the target matrix has a fixed column Map. Data are
// redistributed between the two row Maps, and "filtered" by the
// target matrix's column Map.) This unit test verifies that behavior
// by ensuring the following:
//
// 1. fillComplete() (actually globalAssemble()) does not throw an
// exception when the incoming entries don't exist on the process
// that owns their rows.
//
// 2. The ignored entries are actually ignored. They must change
// neither the structure nor the values of the matrix.
//
// mfh 16 Dec 2012: The one-template-argument version breaks explicit
// instantiation. Ah well.
//
//TEUCHOS_UNIT_TEST_TEMPLATE_1_DECL( CrsMatrix, NonlocalSumInto_Ignore, CrsMatrixType )
TEUCHOS_UNIT_TEST_TEMPLATE_4_DECL( CrsMatrix, NonlocalSumInto_Ignore, LocalOrdinalType, GlobalOrdinalType, ScalarType, NodeType )
{
using Tpetra::createContigMapWithNode;
using Tpetra::createNonContigMapWithNode;
using Tpetra::global_size_t;
using Tpetra::Map;
using Teuchos::Array;
using Teuchos::ArrayView;
using Teuchos::as;
using Teuchos::av_const_cast;
using Teuchos::Comm;
using Teuchos::RCP;
using Teuchos::rcp;
using Teuchos::rcp_const_cast;
using Teuchos::OrdinalTraits;
using Teuchos::outArg;
using Teuchos::ParameterList;
using Teuchos::parameterList;
using Teuchos::reduceAll;
using Teuchos::ScalarTraits;
using Teuchos::tuple;
using Teuchos::TypeNameTraits;
using std::endl;
#if 0
// Extract typedefs from the CrsMatrix specialization.
typedef typename CrsMatrixType::scalar_type scalar_type;
typedef typename CrsMatrixType::local_ordinal_type local_ordinal_type;
typedef typename CrsMatrixType::global_ordinal_type global_ordinal_type;
typedef typename CrsMatrixType::node_type node_type;
#endif // 0
typedef ScalarType scalar_type;
typedef LocalOrdinalType local_ordinal_type;
typedef GlobalOrdinalType global_ordinal_type;
typedef NodeType node_type;
// Typedefs derived from the above canonical typedefs.
typedef ScalarTraits<scalar_type> STS;
typedef Map<local_ordinal_type, global_ordinal_type, node_type> map_type;
// Abbreviation typedefs.
typedef scalar_type ST;
typedef local_ordinal_type LO;
typedef global_ordinal_type GO;
typedef node_type NT;
typedef Tpetra::CrsMatrix<ST, LO, GO, NT> CrsMatrixType;
// CrsGraph specialization corresponding to CrsMatrixType (the
// CrsMatrix specialization).
typedef Tpetra::CrsGraph<LO, GO, NT, typename CrsMatrixType::mat_solve_type> crs_graph_type;
////////////////////////////////////////////////////////////////////
// HERE BEGINS THE TEST.
////////////////////////////////////////////////////////////////////
const global_size_t INVALID = OrdinalTraits<global_size_t>::invalid();
// Get the default communicator.
RCP<const Comm<int> > comm = Tpetra::DefaultPlatform::getDefaultPlatform ().getComm ();
const int numProcs = comm->getSize ();
const int myRank = comm->getRank ();
if (myRank == 0) {
out << "Test with " << numProcs << " process" << (numProcs != 1 ? "es" : "") << endl;
}
// This test doesn't make much sense if there is only one MPI
// process. We let it pass trivially in that case.
if (numProcs == 1) {
out << "Number of processes in world is one; test passes trivially." << endl;
return;
}
// Get a Kokkos Node instance. It would be nice if we could pass in
// parameters here, but threads don't matter for this test; it's a
// test for distributed-memory capabilities.
//.........这里部分代码省略.........
示例12: CheckConsistency
bool CheckConsistency() const {
const RCP<const Map> fullMap = getFullMap();
for (size_t i = 0; i < NumMaps(); i++) {
const RCP<const Map> map = getMap(i);
ArrayView<const GlobalOrdinal> mapGids = map->getNodeElementList();
for (typename ArrayView< const GlobalOrdinal >::const_iterator it = mapGids.begin(); it != mapGids.end(); it++)
if (fullMap->isNodeGlobalElement(*it) == false)
return false; // Global ID (*it) not found locally on this proc in fullMap -> error
}
return true;
}
示例13: rcp
// This test is only meaningful in an MPI build.
TEUCHOS_UNIT_TEST( Map, replaceCommWithSubset )
{
typedef int local_ordinal_type;
typedef long global_ordinal_type;
typedef Tpetra::Map<local_ordinal_type, global_ordinal_type> map_type;
typedef Array<global_ordinal_type>::size_type size_type;
RCP<const Comm<int> > origComm = rcp (new MpiComm<int> (MPI_COMM_WORLD));
const int numProcs = origComm->getSize ();
const int myRank = origComm->getRank ();
// Create a Map in which all processes have a nonzero number of elements.
const size_type numGidsPerProc = 3;
const size_type myNumGids = numGidsPerProc;
Array<global_ordinal_type> myGids (myNumGids);
for (size_type k = 0; k < myNumGids; ++k) {
myGids[k] = as<global_ordinal_type> (myRank) *
as<global_ordinal_type> (numGidsPerProc) +
as<global_ordinal_type> (k);
}
const global_size_t globalNumElts = as<global_size_t> (numGidsPerProc) *
as<global_size_t> (numProcs);
const global_ordinal_type indexBase = 0;
RCP<const map_type> origMap (new map_type (globalNumElts, myGids (),
indexBase, origComm));
// Create a new communicator that excludes Proc 0.
// This will exercise recomputing the index base.
const int color = (myRank == 0) ? 0 : 1;
const int key = 0;
RCP<const Comm<int> > newComm = origComm->split (color, key);
if (myRank == 0) {
newComm = null;
}
// Create the new Map distributed over the subset communicator.
RCP<const map_type> newMap = origMap->replaceCommWithSubset (newComm);
// Test collectively for success, so the test doesn't hang on failure.
int localSuccess = 1;
std::ostringstream err;
if (myRank == 0) {
if (! newMap.is_null ()) {
localSuccess = 0;
err << "removeEmptyProcesses() should have returned null, but did not."
<< endl;
}
} else {
if (newMap.is_null ()) {
localSuccess = 0;
err << "removeEmptyProcesses() should not have returned null, but did."
<< endl;
} else {
RCP<const Comm<int> > theNewComm = newMap->getComm ();
if (theNewComm->getSize () != numProcs - 1) {
localSuccess = 0;
err << "New communicator should have " << (numProcs - 1)
<< " processes, but has " << theNewComm->getSize ()
<< " processes instead." << endl;
}
if (newMap->getGlobalNumElements () != origMap->getGlobalNumElements () - numGidsPerProc) {
localSuccess = 0;
err << "New Map has " << newMap->getGlobalNumElements () << " global "
<< "elements, but should have "
<< (origMap->getGlobalNumElements () - numGidsPerProc) << "." << endl;
}
if (newMap->getNodeNumElements () != origMap->getNodeNumElements ()) {
localSuccess = 0;
err << "New Map has " << newMap->getNodeNumElements () << " local "
<< "elements, but should have " << origMap->getNodeNumElements ()
<< "." << endl;
}
if (newMap->getIndexBase () != as<global_ordinal_type> (numGidsPerProc)) {
localSuccess = 0;
err << "New Map has index base " << newMap->getIndexBase ()
<< ", but should have index base " << numGidsPerProc << "." << endl;
}
ArrayView<const global_ordinal_type> myNewGids =
newMap->getNodeElementList ();
if (myNewGids.size () != myGids.size () ||
! std::equal (myNewGids.begin (), myNewGids.end (), myGids.begin ())) {
localSuccess = 0;
err << "New Map has local GID list " << toString (myNewGids) << ", but "
<< "should have local GID list " << toString (myGids ()) << "."
<< endl;
}
}
}
int globalSuccess = 0;
reduceAll (*origComm, REDUCE_MIN, localSuccess, outArg (globalSuccess));
if (globalSuccess == 0) {
if (myRank == 0) {
cerr << "TEST FAILED" << endl
<< "Error messages from each process:" << endl << endl;
}
//.........这里部分代码省略.........