本文整理汇总了C++中ArrayView::begin方法的典型用法代码示例。如果您正苦于以下问题:C++ ArrayView::begin方法的具体用法?C++ ArrayView::begin怎么用?C++ ArrayView::begin使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ArrayView
的用法示例。
在下文中一共展示了ArrayView::begin方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: local_graph
//-----------------------------------------------------------------------------
Graph GraphBuilder::local_graph(const Mesh& mesh, const GenericDofMap& dofmap0,
const GenericDofMap& dofmap1)
{
Timer timer("Build local sparsity graph from dofmaps");
// Create empty graph
const std::size_t n = dofmap0.global_dimension();
Graph graph(n);
// Build graph
for (CellIterator cell(mesh); !cell.end(); ++cell)
{
const ArrayView<const dolfin::la_index> dofs0
= dofmap0.cell_dofs(cell->index());
const ArrayView<const dolfin::la_index> dofs1
= dofmap1.cell_dofs(cell->index());
//std::vector<dolfin::la_index>::const_iterator node0, node1;
for (auto node0 = dofs0.begin(); node0 != dofs0.end(); ++node0)
for (auto node1 = dofs1.begin(); node1 != dofs1.end(); ++node1)
if (*node0 != *node1)
graph[*node0].insert(*node1);
}
return graph;
}
示例2:
void DefaultProductMultiVector<Scalar>::initializeImpl(
const RCP<const DefaultProductVectorSpace<Scalar> > &productSpace_in,
const ArrayView<const RCP<MultiVectorType> > &multiVecs
)
{
// This function provides the "strong" guarantee (i.e. if an exception is
// thrown, then *this will be left in the original state as before the
// function was called)!
#ifdef TEUCHOS_DEBUG
TEUCHOS_ASSERT(nonnull(productSpace_in));
TEUCHOS_ASSERT_EQUALITY(multiVecs.size(), productSpace_in->numBlocks());
#endif // TEUCHOS_DEBUG
const RCP<const VectorSpaceBase<Scalar> >
theDomain = multiVecs[0]->domain();
const int numBlocks = productSpace_in->numBlocks();
#ifdef TEUCHOS_DEBUG
for ( int k = 0; k < numBlocks; ++k ) {
THYRA_ASSERT_VEC_SPACES(
Teuchos::TypeNameTraits<DefaultProductMultiVector<Scalar> >::name(),
*theDomain, *multiVecs[k]->domain()
);
}
#endif
productSpace_ = productSpace_in;
numBlocks_ = numBlocks;
multiVecs_.assign(multiVecs.begin(),multiVecs.end());
}
示例3: make_pair
std::pair<CudaEvent, view_type>
copy(const ArrayView<
value_type,
HostCoordinator<
value_type,
PinnedAllocator<
value_type,
alignment>>> &from,
view_type &to) {
assert(from.size()==to.size());
#ifdef VERBOSE
using oType = ArrayView< value_type, HostCoordinator< value_type, PinnedAllocator< value_type, alignment>>>;
std::cout << util::pretty_printer<DeviceCoordinator>::print(*this)
<< "::" << util::blue("copy") << "(asynchronous, " << from.size() << ")"
<< "\n " << util::type_printer<oType>::print() << " @ " << from.data()
<< util::yellow(" -> ")
<< util::type_printer<view_type>::print() << " @ " << to.data() << std::endl;
#endif
auto status = cudaMemcpy(
reinterpret_cast<void*>(to.begin()),
reinterpret_cast<const void*>(from.begin()),
from.size()*sizeof(value_type),
cudaMemcpyHostToDevice
);
if(status != cudaSuccess) {
std::cerr << util::red("error") << " bad CUDA memcopy, unable to copy " << sizeof(T)*from.size() << " bytes from host to device";
exit(-1);
}
CudaEvent event;
return std::make_pair(event, to);
}
示例4: assign
template<class T> inline
void ArrayRCP<T>::deepCopy(const ArrayView<const T>& av)
{
if (av.size() == 0) {
*this = null;
return;
}
assign(av.begin(), av.end());
}
示例5: verify_empty
inline void verify_empty(const ArrayView<T> &v)
{
// NOTE: implicitly tests data(), size(), begin(), cbegin(), and
// operator bool()
REQUIRE(!v);
REQUIRE(v.data() == nullptr);
REQUIRE(v.size() == 0);
REQUIRE(v.begin() == nullptr);
REQUIRE(v.cbegin() == nullptr);
}
示例6:
void ArrayView<T>::assign(const ArrayView<const T>& array) const
{
debug_assert_valid_ptr();
debug_assert_not_null();
if (this->getRawPtr()==array.getRawPtr() && this->size()==array.size())
return; // Assignment to self
debug_assert_in_range(0,array.size());
std::copy( array.begin(), array.end(), this->begin() );
// Note: Above, in debug mode, the iterators are range checked! In
// optimized mode, these are raw pointers which should run very fast!
}
示例7: CheckConsistency
bool CheckConsistency() const {
const RCP<const Map> fullMap = getFullMap();
for (size_t i = 0; i < NumMaps(); i++) {
const RCP<const Map> map = getMap(i);
ArrayView<const GlobalOrdinal> mapGids = map->getNodeElementList();
for (typename ArrayView< const GlobalOrdinal >::const_iterator it = mapGids.begin(); it != mapGids.end(); it++)
if (fullMap->isNodeGlobalElement(*it) == false)
return false; // Global ID (*it) not found locally on this proc in fullMap -> error
}
return true;
}
示例8: verify_N
inline void verify_N(const ArrayView<T> &v, int N)
{
REQUIRE(N > 0);
// NOTE: implicitly tests data(), size(), begin(), cbegin(), operator bool(),
// and operator[]
REQUIRE(v);
REQUIRE(v.data() != nullptr);
REQUIRE(v.begin() != nullptr);
REQUIRE(v.cbegin() != nullptr);
REQUIRE(v.size() == size_t(N));
for (int i = 0; i < N; ++i)
REQUIRE(v[i] == i);
}
示例9: testIdentifierModel
void testIdentifierModel(std::string fname, zgno_t xdim, zgno_t ydim, zgno_t zdim,
const RCP<const Comm<int> > &comm, bool consecutiveIds)
{
int rank = comm->getRank();
int fail = 0, gfail = 0;
std::bitset<Zoltan2::NUM_MODEL_FLAGS> modelFlags = 0;
if (consecutiveIds)
modelFlags.set(Zoltan2::IDS_MUST_BE_GLOBALLY_CONSECUTIVE);
RCP<const Zoltan2::Environment> env = rcp(new Zoltan2::Environment);
//////////////////////////////////////////////////////////////
// Use an Tpetra::CrsMatrix for the user data.
//////////////////////////////////////////////////////////////
typedef Tpetra::CrsMatrix<zscalar_t, zlno_t, zgno_t> tcrsMatrix_t;
UserInputForTests *uinput;
if (fname.size() > 0)
uinput = new UserInputForTests(testDataFilePath, fname, comm, true);
else
uinput = new UserInputForTests(xdim,ydim,zdim,string(""),comm, true, true);
RCP<tcrsMatrix_t > M = uinput->getUITpetraCrsMatrix();
zlno_t nLocalIds = M->getNodeNumRows();
zgno_t nGlobalIds = M->getGlobalNumRows();
ArrayView<const zgno_t> idList = M->getRowMap()->getNodeElementList();
std::set<zgno_t> idSet(idList.begin(), idList.end());
//////////////////////////////////////////////////////////////
// Create an IdentifierModel with this input
//////////////////////////////////////////////////////////////
typedef Zoltan2::XpetraCrsMatrixAdapter<tcrsMatrix_t> adapter_t;
typedef Zoltan2::MatrixAdapter<tcrsMatrix_t> base_adapter_t;
typedef Zoltan2::StridedData<zlno_t, zscalar_t> input_t;
RCP<const adapter_t> ia = Teuchos::rcp(new adapter_t(M));
Zoltan2::IdentifierModel<base_adapter_t> *model = NULL;
RCP<const base_adapter_t> base_ia =
Teuchos::rcp_dynamic_cast<const base_adapter_t>(ia);
try {
model = new Zoltan2::IdentifierModel<base_adapter_t>(
base_ia, env, comm, modelFlags);
}
catch (std::exception &e) {
std::cerr << rank << ") " << e.what() << std::endl;
fail = 1;
}
gfail = globalFail(comm, fail);
if (gfail)
printFailureCode(comm, fail);
// Test the IdentifierModel interface
if (model->getLocalNumIdentifiers() != size_t(nLocalIds)) {
std::cerr << rank << ") getLocalNumIdentifiers "
<< model->getLocalNumIdentifiers() << " "
<< nLocalIds << std::endl;
fail = 2;
}
if (!fail && model->getGlobalNumIdentifiers() != size_t(nGlobalIds)) {
std::cerr << rank << ") getGlobalNumIdentifiers "
<< model->getGlobalNumIdentifiers() << " "
<< nGlobalIds << std::endl;
fail = 3;
}
gfail = globalFail(comm, fail);
if (gfail)
printFailureCode(comm, fail);
ArrayView<const zgno_t> gids;
ArrayView<input_t> wgts;
model->getIdentifierList(gids, wgts);
if (!fail && gids.size() != nLocalIds) {
std::cerr << rank << ") getIdentifierList IDs "
<< gids.size() << " "
<< nLocalIds << std::endl;
fail = 5;
}
if (!fail && wgts.size() != 0) {
std::cerr << rank << ") getIdentifierList Weights "
<< wgts.size() << " "
<< 0 << std::endl;
fail = 6;
}
for (zlno_t i=0; !fail && i < nLocalIds; i++) {
std::set<zgno_t>::iterator next = idSet.find(gids[i]);
//.........这里部分代码省略.........
示例10: if
bool
Map<LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType> >::
locallySameAs (const Map<LocalOrdinal, GlobalOrdinal, node_type>& map) const
{
using Teuchos::ArrayView;
typedef GlobalOrdinal GO;
typedef typename ArrayView<const GO>::size_type size_type;
// If both Maps are contiguous, we can compare their GID ranges
// easily by looking at the min and max GID on this process.
// Otherwise, we'll compare their GID lists. If only one Map is
// contiguous, then we only have to call getNodeElementList() on
// the noncontiguous Map. (It's best to avoid calling it on a
// contiguous Map, since it results in unnecessary storage that
// persists for the lifetime of the Map.)
if (getNodeNumElements () != map.getNodeNumElements ()) {
return false;
}
else if (getMinGlobalIndex () != map.getMinGlobalIndex () ||
getMaxGlobalIndex () != map.getMaxGlobalIndex ()) {
return false;
}
else {
if (isContiguous ()) {
if (map.isContiguous ()) {
return true; // min and max match, so the ranges match.
}
else { // *this is contiguous, but map is not contiguous
TEUCHOS_TEST_FOR_EXCEPTION(
! this->isContiguous () || map.isContiguous (), std::logic_error,
"Tpetra::Map::locallySameAs: BUG");
ArrayView<const GO> rhsElts = map.getNodeElementList ();
const GO minLhsGid = this->getMinGlobalIndex ();
const size_type numRhsElts = rhsElts.size ();
for (size_type k = 0; k < numRhsElts; ++k) {
const GO curLhsGid = minLhsGid + static_cast<GO> (k);
if (curLhsGid != rhsElts[k]) {
return false; // stop on first mismatch
}
}
return true;
}
}
else if (map.isContiguous ()) { // *this is not contiguous, but map is
TEUCHOS_TEST_FOR_EXCEPTION(
this->isContiguous () || ! map.isContiguous (), std::logic_error,
"Tpetra::Map::locallySameAs: BUG");
ArrayView<const GO> lhsElts = this->getNodeElementList ();
const GO minRhsGid = map.getMinGlobalIndex ();
const size_type numLhsElts = lhsElts.size ();
for (size_type k = 0; k < numLhsElts; ++k) {
const GO curRhsGid = minRhsGid + static_cast<GO> (k);
if (curRhsGid != lhsElts[k]) {
return false; // stop on first mismatch
}
}
return true;
}
else { // neither *this nor map are contiguous
// std::equal requires that the latter range is as large as
// the former. We know the ranges have equal length, because
// they have the same number of local entries.
ArrayView<const GO> lhsElts = getNodeElementList ();
ArrayView<const GO> rhsElts = map.getNodeElementList ();
return std::equal (lhsElts.begin (), lhsElts.end (), rhsElts.begin ());
}
}
}
示例11: newColMap
//.........这里部分代码省略.........
}
}
// If, for some reason, the running count doesn't match the
// orignal count, fill in any remaining GID spots with an
// obviously invalid value. We don't want to stop yet, because
// other processes might not have noticed this error; Map
// construction is a collective, so we can't stop now.
if (ownedPos != numOwnedGids) {
lclErr = true;
err << prefix << "On Process " << comm->getRank () << ", ownedPos = "
<< ownedPos << " != numOwnedGids = " << numOwnedGids << endl;
for (LO colMapLid = ownedPos; colMapLid < numOwnedGids; ++colMapLid) {
ownedGids[colMapLid] = Teuchos::OrdinalTraits<GO>::invalid ();
}
}
if (remotePos != numRemoteGids) {
lclErr = true;
err << prefix << "On Process " << comm->getRank () << ", remotePos = "
<< remotePos << " != numRemoteGids = " << numRemoteGids << endl;
for (LO colMapLid = remotePos; colMapLid < numRemoteGids; ++colMapLid) {
remoteGids[colMapLid] = Teuchos::OrdinalTraits<GO>::invalid ();
}
}
// Figure out what processes own what GIDs in the domain Map.
// Initialize the output array of remote PIDs with the "invalid
// process rank" -1, to help us test whether getRemoteIndexList
// did its job.
Array<int> remotePids (numRemoteGids, -1);
Array<LO> remoteLids;
if (makeImport) {
remoteLids.resize (numRemoteGids);
std::fill (remoteLids.begin (), remoteLids.end (),
Teuchos::OrdinalTraits<LO>::invalid ());
}
LookupStatus lookupStatus;
if (makeImport) {
lookupStatus = domMap.getRemoteIndexList (remoteGids, remotePids (),
remoteLids ());
} else {
lookupStatus = domMap.getRemoteIndexList (remoteGids, remotePids ());
}
// If any process returns IDNotPresent, then at least one of the
// remote indices was not present in the domain Map. This means
// that the Import object cannot be constructed, because of
// incongruity between the column Map and domain Map. This means
// that either the column Map or domain Map, or both, is
// incorrect.
const bool getRemoteIndexListFailed = (lookupStatus == IDNotPresent);
if (getRemoteIndexListFailed) {
lclErr = true;
err << prefix << "On Process " << comm->getRank () << ", some indices "
"in the input colMap (the original column Map) are not in domMap (the "
"domain Map). Either these indices or the domain Map is invalid. "
"Likely cause: For a nonsquare matrix, you must give the domain and "
"range Maps as input to fillComplete." << endl;
}
// Check that getRemoteIndexList actually worked, by making sure
// that none of the remote PIDs are -1.
for (LO k = 0; k < numRemoteGids; ++k) {
bool foundInvalidPid = false;
if (remotePids[k] == -1) {
foundInvalidPid = true;
示例12: v
template<class T> inline
std::vector<T> Teuchos::createVector( const ArrayView<const T> &av )
{
std::vector<T> v(av.begin(), av.end());
return v;
}
示例13: assemble_interior_facets
//.........这里部分代码省略.........
{
// Get the array of facet indices of current color
const std::vector<std::size_t>& colored_facets = entities_of_color[color];
// Number of facets of current color
const int num_facets = colored_facets.size();
// OpenMP test loop over cells of the same color
Progress p(AssemblerBase::progress_message(A.rank(), "interior facets"),
mesh.num_facets());
#pragma omp parallel for schedule(guided, 20) firstprivate(ufc, ufc_cell0, ufc_cell1, vertex_coordinates0, vertex_coordinates1, macro_dofs, integral)
for (int facet_index = 0; facet_index < num_facets; ++facet_index)
{
// Facet index
const std::size_t index = colored_facets[facet_index];
// Create cell
const Facet facet(mesh, index);
// Only consider interior facets
if (facet.exterior())
{
p++;
continue;
}
// Get integral for sub domain (if any)
if (use_domains)
integral = ufc.get_interior_facet_integral((*domains)[facet]);
// Skip integral if zero
if (!integral)
continue;
// Get cells incident with facet (which is 0 and 1 here is arbitrary)
dolfin_assert(facet.num_entities(D) == 2);
std::size_t cell_index_plus = facet.entities(D)[0];
std::size_t cell_index_minus = facet.entities(D)[1];
if (use_cell_domains && (*cell_domains)[cell_index_plus] < (*cell_domains)[cell_index_minus])
std::swap(cell_index_plus, cell_index_minus);
// The convention '+' = 0, '-' = 1 is from ffc
const Cell cell0(mesh, cell_index_plus);
const Cell cell1(mesh, cell_index_minus);
// Get local index of facet with respect to each cell
const std::size_t local_facet0 = cell0.index(facet);
const std::size_t local_facet1 = cell1.index(facet);
// Update UFC cell
cell0.get_vertex_coordinates(vertex_coordinates0);
cell0.get_cell_data(ufc_cell0, local_facet0);
cell1.get_vertex_coordinates(vertex_coordinates1);
cell1.get_cell_data(ufc_cell1, local_facet1);
// Update to current pair of cells
ufc.update(cell0, vertex_coordinates0, ufc_cell0,
cell1, vertex_coordinates1, ufc_cell1,
integral->enabled_coefficients());
// Tabulate dofs for each dimension on macro element
for (std::size_t i = 0; i < form_rank; i++)
{
// Get dofs for each cell
const ArrayView<const dolfin::la_index> cell_dofs0
= dofmaps[i]->cell_dofs(cell0.index());
const ArrayView<const dolfin::la_index> cell_dofs1
= dofmaps[i]->cell_dofs(cell1.index());
// Create space in macro dof vector
macro_dofs[i].resize(cell_dofs0.size() + cell_dofs1.size());
// Copy cell dofs into macro dof vector
std::copy(cell_dofs0.begin(), cell_dofs0.end(), macro_dofs[i].begin());
std::copy(cell_dofs1.begin(), cell_dofs1.end(),
macro_dofs[i].begin() + cell_dofs0.size());
}
// Tabulate exterior interior facet tensor on macro element
integral->tabulate_tensor(ufc.macro_A.data(),
ufc.macro_w(),
vertex_coordinates0.data(),
vertex_coordinates1.data(),
local_facet0,
local_facet1,
ufc_cell0.orientation,
ufc_cell1.orientation);
// Add entries to global tensor
std::vector<ArrayView<const la_index>>
macro_dofs_p(macro_dofs.size());
for (std::size_t i = 0; i < macro_dofs.size(); ++i)
macro_dofs_p[i].set(macro_dofs[i]);
A.add_local(ufc.macro_A.data(), macro_dofs_p);
p++;
}
}
}
示例14: rcp
// This test is only meaningful in an MPI build.
TEUCHOS_UNIT_TEST( Map, replaceCommWithSubset )
{
typedef int local_ordinal_type;
typedef long global_ordinal_type;
typedef Tpetra::Map<local_ordinal_type, global_ordinal_type> map_type;
typedef Array<global_ordinal_type>::size_type size_type;
RCP<const Comm<int> > origComm = rcp (new MpiComm<int> (MPI_COMM_WORLD));
const int numProcs = origComm->getSize ();
const int myRank = origComm->getRank ();
// Create a Map in which all processes have a nonzero number of elements.
const size_type numGidsPerProc = 3;
const size_type myNumGids = numGidsPerProc;
Array<global_ordinal_type> myGids (myNumGids);
for (size_type k = 0; k < myNumGids; ++k) {
myGids[k] = as<global_ordinal_type> (myRank) *
as<global_ordinal_type> (numGidsPerProc) +
as<global_ordinal_type> (k);
}
const global_size_t globalNumElts = as<global_size_t> (numGidsPerProc) *
as<global_size_t> (numProcs);
const global_ordinal_type indexBase = 0;
RCP<const map_type> origMap (new map_type (globalNumElts, myGids (),
indexBase, origComm));
// Create a new communicator that excludes Proc 0.
// This will exercise recomputing the index base.
const int color = (myRank == 0) ? 0 : 1;
const int key = 0;
RCP<const Comm<int> > newComm = origComm->split (color, key);
if (myRank == 0) {
newComm = null;
}
// Create the new Map distributed over the subset communicator.
RCP<const map_type> newMap = origMap->replaceCommWithSubset (newComm);
// Test collectively for success, so the test doesn't hang on failure.
int localSuccess = 1;
std::ostringstream err;
if (myRank == 0) {
if (! newMap.is_null ()) {
localSuccess = 0;
err << "removeEmptyProcesses() should have returned null, but did not."
<< endl;
}
} else {
if (newMap.is_null ()) {
localSuccess = 0;
err << "removeEmptyProcesses() should not have returned null, but did."
<< endl;
} else {
RCP<const Comm<int> > theNewComm = newMap->getComm ();
if (theNewComm->getSize () != numProcs - 1) {
localSuccess = 0;
err << "New communicator should have " << (numProcs - 1)
<< " processes, but has " << theNewComm->getSize ()
<< " processes instead." << endl;
}
if (newMap->getGlobalNumElements () != origMap->getGlobalNumElements () - numGidsPerProc) {
localSuccess = 0;
err << "New Map has " << newMap->getGlobalNumElements () << " global "
<< "elements, but should have "
<< (origMap->getGlobalNumElements () - numGidsPerProc) << "." << endl;
}
if (newMap->getNodeNumElements () != origMap->getNodeNumElements ()) {
localSuccess = 0;
err << "New Map has " << newMap->getNodeNumElements () << " local "
<< "elements, but should have " << origMap->getNodeNumElements ()
<< "." << endl;
}
if (newMap->getIndexBase () != as<global_ordinal_type> (numGidsPerProc)) {
localSuccess = 0;
err << "New Map has index base " << newMap->getIndexBase ()
<< ", but should have index base " << numGidsPerProc << "." << endl;
}
ArrayView<const global_ordinal_type> myNewGids =
newMap->getNodeElementList ();
if (myNewGids.size () != myGids.size () ||
! std::equal (myNewGids.begin (), myNewGids.end (), myGids.begin ())) {
localSuccess = 0;
err << "New Map has local GID list " << toString (myNewGids) << ", but "
<< "should have local GID list " << toString (myGids ()) << "."
<< endl;
}
}
}
int globalSuccess = 0;
reduceAll (*origComm, REDUCE_MIN, localSuccess, outArg (globalSuccess));
if (globalSuccess == 0) {
if (myRank == 0) {
cerr << "TEST FAILED" << endl
<< "Error messages from each process:" << endl << endl;
}
//.........这里部分代码省略.........
示例15: newdist
TEUCHOS_UNIT_TEST( Distributor, createfromsendsandrecvs)
{
using Teuchos::outArg;
using Teuchos::RCP;
using Teuchos::REDUCE_MIN;
using Teuchos::reduceAll;
using Teuchos::TimeMonitor;
using std::endl;
//typedef Tpetra::Vector<>::scalar_type SC;
auto comm = Tpetra::DefaultPlatform::getDefaultPlatform ().getComm ();
int my_proc = comm->getRank();
//int nprocs = comm->getSize(); // unused
// Set debug = true if you want immediate debug output to stderr.
const bool debug = true;
Teuchos::RCP<Teuchos::FancyOStream> outPtr =
debug ?
Teuchos::getFancyOStream (Teuchos::rcpFromRef (std::cerr)) :
Teuchos::rcpFromRef (out);
Teuchos::FancyOStream& myOut = *outPtr;
myOut << "Distributor createfromsendsandrecvs" << endl;
Teuchos::OSTab tab1 (myOut);
myOut << "Create CrsGraph, BlockCrsMatrix, and Vectors" << endl;
auto G = getTpetraGraph (comm);
auto A = getTpetraBlockCrsMatrix (G);
Tpetra::Vector<> X (A->getDomainMap ());
Tpetra::Vector<> Y (A->getRangeMap ());
myOut << "Get the CrsGraph's Import object" << endl;
RCP<const Tpetra::Import<> > importer = G->getImporter ();
if (importer.is_null ()) {
TEST_EQUALITY_CONST( comm->getSize (), 1 );
myOut << "The CrsGraph's Import object is null";
if (success) {
myOut << ". This is to be expected when the communicator only has 1 "
"process. We'll say this test succeeded and be done with it." << endl;
}
else {
myOut << ", but the communicator has " << comm->getSize () << " != 1 "
"processes. That means we didn't construct the test graph correctly. "
"It makes no sense to continue this test beyond this point." << endl;
}
return;
}
auto dist = importer->getDistributor();
myOut << "Build up arrays to construct equivalent Distributor" << endl;
const ArrayView<const int> procF = dist.getProcsFrom();
const ArrayView<const int> procT = dist.getProcsTo();
const ArrayView<const size_t> lenF = dist.getLengthsFrom();
const ArrayView<const size_t> lenT = dist.getLengthsTo();
// This section takes the consolidated procF and procT with the length and re-builds
// the un-consolidated lists of processors from and to that
// This is needed because in Tpetra::constructExpert, the unconsolidated procsFrom and ProcsTo
// will be used.
Teuchos::Array<int> nuF;
Teuchos::Array<int> nuT;
int sumLenF=0;
for ( ArrayView<const size_t>::iterator b = lenF.begin(); b!=lenF.end(); ++b)
sumLenF+=(*b);
int sumLenT=0;
for ( ArrayView<const size_t>::iterator b = lenT.begin(); b!=lenT.end(); ++b)
sumLenT+=(*b);
nuF.resize(sumLenF);
nuT.resize(sumLenT);
size_t p=0;
for ( size_t j = 0; j<(size_t)procF.size(); ++j) {
size_t lend = p+lenF[j];
for (size_t i = p ; i < lend ; ++i)
nuF[i]=procF[j];
p+=lenF[j];
}
p=0;
for ( size_t j = 0; j<(size_t) procT.size(); ++j) {
size_t lend = p+lenT[j];
for (size_t i = p ; i < lend ; ++i)
nuT[i]=procT[j];
p+=lenT[j];
}
myOut << "Create a new Distributor using createFromSendsAndRecvs" << endl;
Tpetra::Distributor newdist(comm);
TEST_NOTHROW( newdist.createFromSendsAndRecvs(nuT,nuF) );
{
int lclSuccess = success ? 1 : 0;
int gblSuccess = 0;
reduceAll<int, int> (*comm, REDUCE_MIN, lclSuccess, outArg (gblSuccess) );
TEST_EQUALITY_CONST( gblSuccess, 1 );
if (gblSuccess != 1) {
myOut << "Test FAILED on some process; giving up early" << endl;
}
}
//.........这里部分代码省略.........