本文整理汇总了C++中ArrayView::getRawPtr方法的典型用法代码示例。如果您正苦于以下问题:C++ ArrayView::getRawPtr方法的具体用法?C++ ArrayView::getRawPtr怎么用?C++ ArrayView::getRawPtr使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ArrayView
的用法示例。
在下文中一共展示了ArrayView::getRawPtr方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
void
AbstractConcreteMatrixAdapter<
Epetra_RowMatrix,
DerivedMat>::getGlobalRowCopy_impl(global_ordinal_t row,
const ArrayView<global_ordinal_t>& indices,
const ArrayView<scalar_t>& vals,
size_t& nnz) const
{
using Teuchos::as;
local_ordinal_t local_row = this->row_map_->getLocalElement(row);
int nnz_ret = 0;
int rowmatrix_return_val
= this->mat_->ExtractMyRowCopy(as<int>(local_row),
as<int>(std::min(indices.size(), vals.size())),
nnz_ret,
vals.getRawPtr(),
indices.getRawPtr());
TEUCHOS_TEST_FOR_EXCEPTION( rowmatrix_return_val != 0,
std::runtime_error,
"Epetra_RowMatrix object returned error code "
<< rowmatrix_return_val << " from ExtractMyRowCopy." );
nnz = as<size_t>(nnz_ret);
// Epetra_CrsMatrix::ExtractMyRowCopy returns local column
// indices, so transform these into global indices
for( size_t i = 0; i < nnz; ++i ){
indices[i] = this->col_map_->getGlobalElement(indices[i]);
}
}
开发者ID:KineticTheory,项目名称:Trilinos,代码行数:30,代码来源:Amesos2_EpetraRowMatrix_AbstractMatrixAdapter_def.hpp
示例2: runtime_error
RCP<Epetra_CrsMatrix> UserInputForTests::getEpetraCrsMatrix()
{
if (M_.is_null())
throw std::runtime_error("could not read mtx file");
RCP<Epetra_CrsGraph> egraph = getEpetraCrsGraph();
eM_ = rcp(new Epetra_CrsMatrix(Copy, *egraph));
size_t maxRow = M_->getNodeMaxNumRowEntries();
int nrows = egraph->NumMyRows();
int base = egraph->IndexBase();
const Epetra_BlockMap &rowMap = egraph->RowMap();
const Epetra_BlockMap &colMap = egraph->ColMap();
Array<int> colGid(maxRow);
for (int i=0; i < nrows; i++){
ArrayView<const int> colLid;
ArrayView<const scalar_t> nz;
M_->getLocalRowView(i+base, colLid, nz);
size_t rowSize = colLid.size();
int rowGid = rowMap.GID(i+base);
for (size_t j=0; j < rowSize; j++){
colGid[j] = colMap.GID(colLid[j]);
}
eM_->InsertGlobalValues(
rowGid, rowSize, nz.getRawPtr(), colGid.getRawPtr());
}
eM_->FillComplete();
return eM_;
}
示例3:
void EpetraCrsMatrixT<EpetraGlobalOrdinal>::getLocalRowCopy(LocalOrdinal LocalRow, const ArrayView<LocalOrdinal> &Indices, const ArrayView<Scalar> &Values, size_t &NumEntries) const {
XPETRA_MONITOR("EpetraCrsMatrixT::getLocalRowCopy");
int numEntries = -1;
XPETRA_ERR_CHECK(mtx_->ExtractMyRowCopy(LocalRow, Indices.size(), numEntries, Values.getRawPtr(), Indices.getRawPtr()));
NumEntries = numEntries;
}
示例4:
void MpiComm<Ordinal>::readySend(
const ArrayView<const char> &sendBuffer,
const int destRank
) const
{
TEUCHOS_COMM_TIME_MONITOR(
"Teuchos::MpiComm<"<<OrdinalTraits<Ordinal>::name()<<">::readySend(...)"
);
#ifdef TEUCHOS_DEBUG
TEST_FOR_EXCEPTION(
! ( 0 <= destRank && destRank < size_ ), std::logic_error
,"Error, destRank = " << destRank << " is not < 0 or is not"
" in the range [0,"<<size_-1<<"]!"
);
#endif // TEUCHOS_DEBUG
#ifdef TEUCHOS_MPI_COMM_DUMP
if(show_dump) {
dumpBuffer<Ordinal,char>(
"Teuchos::MpiComm<Ordinal>::readySend(...)"
,"sendBuffer", bytes, sendBuffer
);
}
#endif // TEUCHOS_MPI_COMM_DUMP
MPI_Rsend(
const_cast<char*>(sendBuffer.getRawPtr()),sendBuffer.size(),MPI_CHAR,destRank,tag_,*rawMpiComm_
);
// ToDo: What about error handling???
}
示例5:
REFCOUNTPTR_INLINE
Teuchos::ArrayView<T2>
Teuchos::av_const_cast(const ArrayView<T1>& p1)
{
T2 *ptr2 = const_cast<T2*>(p1.getRawPtr());
return ArrayView<T2>(ptr2, p1.size());
// Note: Above is just fine even if p1.get()==NULL!
}
示例6: order
int order(const RCP<OrderingSolution<typename Adapter::lno_t,
typename Adapter::gno_t> > &solution)
{
#ifndef HAVE_ZOLTAN2_AMD
throw std::runtime_error(
"BUILD ERROR: AMD requested but not compiled into Zoltan2.\n"
"Please set CMake flag Zoltan2_ENABLE_AMD:BOOL=ON.");
#else
typedef typename Adapter::lno_t lno_t;
typedef typename Adapter::scalar_t scalar_t;
int ierr= 0;
const size_t nVtx = model->getLocalNumVertices();
//cout << "Local num vertices" << nVtx << endl;
ArrayView<const gno_t> edgeIds;
ArrayView<const lno_t> offsets;
ArrayView<StridedData<lno_t, scalar_t> > wgts;
// wgts are ignored in AMD
model->getEdgeList(edgeIds, offsets, wgts);
AMDTraits<lno_t> AMDobj;
double Control[AMD_CONTROL];
double Info[AMD_INFO];
amd_defaults(Control);
amd_control(Control);
lno_t *perm;
perm = (lno_t *) (solution->getPermutationRCP().getRawPtr());
lno_t result = AMDobj.order(nVtx, offsets.getRawPtr(),
edgeIds.getRawPtr(), perm, Control, Info);
if (result != AMD_OK && result != AMD_OK_BUT_JUMBLED)
ierr = -1;
solution->setHavePerm(true);
return ierr;
#endif
}
示例7: allGather
void GlobalMPISession::allGather(int localVal, const ArrayView<int> &allVals)
{
justInTimeInitialize();
TEUCHOS_ASSERT_EQUALITY(allVals.size(), getNProc());
#ifdef HAVE_MPI
MPI_Allgather( &localVal, 1, MPI_INT, allVals.getRawPtr(), 1, MPI_INT,
MPI_COMM_WORLD);
#else
allVals[0] = localVal;
#endif
}
示例8: tfecfFuncName
void EpetraCrsMatrixT<EpetraGlobalOrdinal>::replaceLocalValues(LocalOrdinal localRow, const ArrayView< const LocalOrdinal > &indices, const ArrayView< const Scalar > &values) {
XPETRA_MONITOR("EpetraCrsMatrixT::replaceLocalValues");
{
const std::string tfecfFuncName("replaceLocalValues");
TEUCHOS_TEST_FOR_EXCEPTION_CLASS_FUNC(! isFillActive(), std::runtime_error,
": Fill must be active in order to call this method. If you have already "
"called fillComplete(), you need to call resumeFill() before you can "
"replace values.");
TEUCHOS_TEST_FOR_EXCEPTION_CLASS_FUNC(values.size() != indices.size(),
std::runtime_error, ": values.size() must equal indices.size().");
}
XPETRA_ERR_CHECK(mtx_->ReplaceMyValues(localRow, indices.size(), values.getRawPtr(), indices.getRawPtr()));
}
示例9: sizeof
REFCOUNTPTR_INLINE
Teuchos::ArrayView<T2>
Teuchos::av_reinterpret_cast(const ArrayView<T1>& p1)
{
typedef typename ArrayView<T1>::size_type size_type;
const int sizeOfT1 = sizeof(T1);
const int sizeOfT2 = sizeof(T2);
size_type size2 = (p1.size()*sizeOfT1) / sizeOfT2;
T2 *ptr2 = reinterpret_cast<T2*>(p1.getRawPtr());
return ArrayView<T2>(
ptr2, size2
#ifdef HAVE_TEUCHOS_ARRAY_BOUNDSCHECK
,arcp_reinterpret_cast<T2>(p1.access_private_arcp())
#endif
);
// Note: Above is just fine even if p1.get()==NULL!
}
示例10:
static inline void ASSIGN_SCOTCH_NUM_ARRAY(
SCOTCH_Num **a,
ArrayView<const SCOTCH_Num> &b,
const RCP<const Environment> &env)
{
if (b.size() > 0)
*a = const_cast<SCOTCH_Num *> (b.getRawPtr());
else {
*a = NULL;
// Note: the Scotch manual says that if any rank has a non-NULL array,
// every process must have a non-NULL array. In practice,
// however, this condition is not needed for the arrays we use.
// For now, we'll set these arrays to NULL, because if we
// allocated a dummy value here, we'll have to track whether or
// not we can free it. KDD 1/23/14
}
}
示例11: mpiCommRequest
RCP<CommRequest> MpiComm<Ordinal>::ireceive(
const ArrayView<char> &recvBuffer,
const int sourceRank
) const
{
TEUCHOS_COMM_TIME_MONITOR(
"Teuchos::MpiComm<"<<OrdinalTraits<Ordinal>::name()<<">::ireceive(...)"
);
#ifdef TEUCHOS_DEBUG
assertRank(sourceRank, "sourceRank");
#endif // TEUCHOS_DEBUG
MPI_Request rawMpiRequest = MPI_REQUEST_NULL;
MPI_Irecv(
const_cast<char*>(recvBuffer.getRawPtr()), recvBuffer.size(), MPI_CHAR, sourceRank,
tag_, *rawMpiComm_, &rawMpiRequest );
return mpiCommRequest(rawMpiRequest);
// ToDo: What about MPI error handling???
}
示例12: globalWeightedCutsMessagesHopsByPart
void globalWeightedCutsMessagesHopsByPart(
const RCP<const Environment> &env,
const RCP<const Comm<int> > &comm,
const RCP<const GraphModel<typename Adapter::base_adapter_t> > &graph,
const ArrayView<const typename Adapter::part_t> &parts,
typename Adapter::part_t &numParts,
ArrayRCP<RCP<BaseClassMetrics<typename Adapter::scalar_t> > > &metrics,
ArrayRCP<typename Adapter::scalar_t> &globalSums,
const RCP <const MachineRep> machine)
{
env->debug(DETAILED_STATUS, "Entering globalWeightedCutsMessagesHopsByPart");
//////////////////////////////////////////////////////////
// Initialize return values
typedef typename Adapter::lno_t t_lno_t;
typedef typename Adapter::gno_t t_gno_t;
typedef typename Adapter::scalar_t t_scalar_t;
typedef typename Adapter::part_t part_t;
typedef typename Adapter::node_t t_node_t;
typedef typename Zoltan2::GraphModel<typename Adapter::base_adapter_t>::input_t t_input_t;
t_lno_t localNumVertices = graph->getLocalNumVertices();
t_gno_t globalNumVertices = graph->getGlobalNumVertices();
t_lno_t localNumEdges = graph->getLocalNumEdges();
ArrayView<const t_gno_t> Ids;
ArrayView<t_input_t> v_wghts;
graph->getVertexList(Ids, v_wghts);
typedef GraphMetrics<t_scalar_t> mv_t;
//get the edge ids, and weights
ArrayView<const t_gno_t> edgeIds;
ArrayView<const t_lno_t> offsets;
ArrayView<t_input_t> e_wgts;
graph->getEdgeList(edgeIds, offsets, e_wgts);
std::vector <t_scalar_t> edge_weights;
int numWeightPerEdge = graph->getNumWeightsPerEdge();
int numMetrics = 4; // "edge cuts", messages, hops, weighted hops
if (numWeightPerEdge) numMetrics += numWeightPerEdge * 2; // "weight n", weighted hops per weight n
// add some more metrics to the array
typedef typename ArrayRCP<RCP<BaseClassMetrics<typename Adapter::scalar_t> > >::size_type array_size_type;
metrics.resize( metrics.size() + numMetrics );
for( array_size_type n = metrics.size() - numMetrics; n < metrics.size(); ++n ){
mv_t * newMetric = new mv_t; // allocate the new memory
env->localMemoryAssertion(__FILE__,__LINE__,1,newMetric); // check errors
metrics[n] = rcp( newMetric); // create the new members
}
array_size_type next = metrics.size() - numMetrics; // MDM - this is most likely temporary to preserve the format here - we are now filling a larger array so we may not have started at 0
std::vector <part_t> e_parts (localNumEdges);
#ifdef HAVE_ZOLTAN2_MPI
if (comm->getSize() > 1)
{
Zoltan_DD_Struct *dd = NULL;
MPI_Comm mpicomm = Teuchos::getRawMpiComm(*comm);
int size_gnot = Zoltan2::TPL_Traits<ZOLTAN_ID_PTR, t_gno_t>::NUM_ID;
int debug_level = 0;
Zoltan_DD_Create(&dd, mpicomm,
size_gnot, 0,
sizeof(part_t), localNumVertices, debug_level);
ZOLTAN_ID_PTR ddnotneeded = NULL; // Local IDs not needed
Zoltan_DD_Update(
dd,
(ZOLTAN_ID_PTR) Ids.getRawPtr(),
ddnotneeded,
(char *) &(parts[0]),
NULL,
int(localNumVertices));
Zoltan_DD_Find(
dd,
(ZOLTAN_ID_PTR) edgeIds.getRawPtr(),
ddnotneeded,
(char *)&(e_parts[0]),
NULL,
localNumEdges,
NULL
);
Zoltan_DD_Destroy(&dd);
} else
#endif
{
std::map<t_gno_t,t_lno_t> global_id_to_local_index;
//else everything is local.
//we need a globalid to local index conversion.
//this does not exists till this point, so we need to create one.
for (t_lno_t i = 0; i < localNumVertices; ++i){
//.........这里部分代码省略.........
示例13: setupSamePermuteExport
void
Export<LocalOrdinal,GlobalOrdinal,Node>::
setupSamePermuteExport (Teuchos::Array<GlobalOrdinal>& exportGIDs)
{
using Teuchos::arcp;
using Teuchos::Array;
using Teuchos::ArrayRCP;
using Teuchos::ArrayView;
using Teuchos::as;
using Teuchos::null;
typedef LocalOrdinal LO;
typedef GlobalOrdinal GO;
typedef typename ArrayView<const GO>::size_type size_type;
const Map<LO,GO,Node>& source = * (getSourceMap ());
const Map<LO,GO,Node>& target = * (getTargetMap ());
ArrayView<const GO> sourceGIDs = source.getNodeElementList ();
ArrayView<const GO> targetGIDs = target.getNodeElementList ();
#ifdef HAVE_TPETRA_DEBUG
ArrayView<const GO> rawSrcGids = sourceGIDs;
ArrayView<const GO> rawTgtGids = targetGIDs;
#else
const GO* const rawSrcGids = sourceGIDs.getRawPtr ();
const GO* const rawTgtGids = targetGIDs.getRawPtr ();
#endif // HAVE_TPETRA_DEBUG
const size_type numSrcGids = sourceGIDs.size ();
const size_type numTgtGids = targetGIDs.size ();
const size_type numGids = std::min (numSrcGids, numTgtGids);
// Compute numSameIDs_: the number of initial GIDs that are the
// same (and occur in the same order) in both Maps. The point of
// numSameIDs_ is for the common case of an Export where all the
// overlapping GIDs are at the end of the source Map, but
// otherwise the source and target Maps are the same. This allows
// a fast contiguous copy for the initial "same IDs."
size_type numSameGids = 0;
for ( ; numSameGids < numGids && rawSrcGids[numSameGids] == rawTgtGids[numSameGids]; ++numSameGids)
{} // third clause of 'for' does everything
ExportData_->numSameIDs_ = numSameGids;
// Compute permuteToLIDs_, permuteFromLIDs_, exportGIDs, and
// exportLIDs_. The first two arrays are IDs to be permuted, and
// the latter two arrays are IDs to sent out ("exported"), called
// "export" IDs.
//
// IDs to permute are in both the source and target Maps, which
// means we don't have to send or receive them, but we do have to
// rearrange (permute) them in general. IDs to send are in the
// source Map, but not in the target Map.
exportGIDs.resize (0);
Array<LO>& permuteToLIDs = ExportData_->permuteToLIDs_;
Array<LO>& permuteFromLIDs = ExportData_->permuteFromLIDs_;
Array<LO>& exportLIDs = ExportData_->exportLIDs_;
const LO LINVALID = Teuchos::OrdinalTraits<LO>::invalid ();
const LO numSrcLids = as<LO> (numSrcGids);
// Iterate over the source Map's LIDs, since we only need to do
// GID -> LID lookups for the target Map.
for (LO srcLid = numSameGids; srcLid < numSrcLids; ++srcLid) {
const GO curSrcGid = rawSrcGids[srcLid];
// getLocalElement() returns LINVALID if the GID isn't in the target Map.
// This saves us a lookup (which isNodeGlobalElement() would do).
const LO tgtLid = target.getLocalElement (curSrcGid);
if (tgtLid != LINVALID) { // if target.isNodeGlobalElement (curSrcGid)
permuteToLIDs.push_back (tgtLid);
permuteFromLIDs.push_back (srcLid);
} else {
exportGIDs.push_back (curSrcGid);
exportLIDs.push_back (srcLid);
}
}
// exportLIDs_ is the list of this process' LIDs that it has to
// send out. Since this is an Export, and therefore the target
// Map is nonoverlapping, we know that each export LID only needs
// to be sent to one process. However, the source Map may be
// overlapping, so multiple processes might send to the same LID
// on a receiving process.
TPETRA_ABUSE_WARNING(
getNumExportIDs() > 0 && ! source.isDistributed(),
std::runtime_error,
"::setupSamePermuteExport(): Source has export LIDs but Source is not "
"distributed globally." << std::endl
<< "Exporting to a submap of the target map.");
// Compute exportPIDs_ ("outgoing" process IDs).
//
// For each GID in exportGIDs (GIDs to which this process must
// send), find its corresponding owning process (a.k.a. "image")
// ID in the target Map. Store these process IDs in
// exportPIDs_. These are the process IDs to which the Export
// needs to send data.
//
// We only need to do this if the source Map is distributed;
// otherwise, the Export doesn't have to perform any
// communication.
if (source.isDistributed ()) {
ExportData_->exportPIDs_.resize(exportGIDs.size ());
// This call will assign any GID in the target Map with no
//.........这里部分代码省略.........
示例14: removeUndesiredEdges
size_t removeUndesiredEdges(
const RCP<const Environment> &env,
int myRank,
bool removeSelfEdges,
bool removeOffProcessEdges,
bool removeOffGroupEdges,
ArrayView<const typename InputTraits<User>::zgid_t> &gids,
ArrayView<const typename InputTraits<User>::zgid_t> &gidNbors,
ArrayView<const int> &procIds,
ArrayView<StridedData<typename InputTraits<User>::lno_t,
typename InputTraits<User>::scalar_t> > &edgeWeights,
ArrayView<const typename InputTraits<User>::lno_t> &offsets,
ArrayRCP<const typename InputTraits<User>::zgid_t> &newGidNbors, // out
typename InputTraits<User>::scalar_t **&newWeights, // out
ArrayRCP<const typename InputTraits<User>::lno_t> &newOffsets) // out
{
typedef typename InputTraits<User>::zgid_t zgid_t;
typedef typename InputTraits<User>::scalar_t scalar_t;
typedef typename InputTraits<User>::lno_t lno_t;
size_t numKeep = 0;
size_t numVtx = offsets.size() - 1;
size_t numNbors = gidNbors.size();
env->localInputAssertion(__FILE__, __LINE__, "need more input",
(!removeSelfEdges ||
gids.size() >=
static_cast<typename ArrayView<const zgid_t>::size_type>(numVtx))
&&
(!removeOffProcessEdges ||
procIds.size() >=
static_cast<typename ArrayView<const int>::size_type>(numNbors)) &&
(!removeOffGroupEdges ||
procIds.size() >=
static_cast<typename ArrayView<const int>::size_type>(numNbors)),
BASIC_ASSERTION);
// initialize edge weight array
newWeights = NULL;
int eDim = edgeWeights.size();
// count desired edges
lno_t *offs = new lno_t [numVtx + 1];
env->localMemoryAssertion(__FILE__, __LINE__, numVtx+1, offs);
for (size_t i = 0; i < numVtx+1; i++) offs[i] = 0;
ArrayRCP<const lno_t> offArray = arcp(offs, 0, numVtx+1, true);
const lno_t *allOffs = offsets.getRawPtr();
const zgid_t *allIds = gidNbors.getRawPtr();
const zgid_t *vtx = NULL;
const int *proc = NULL;
if (gids.size() > 0)
vtx = gids.getRawPtr();
if (procIds.size() > 0)
proc = procIds.getRawPtr();
offs[0] = 0;
for (size_t i=0; i < numVtx; i++){
offs[i+1] = 0;
zgid_t vid = vtx ? vtx[i] : zgid_t(0);
for (lno_t j=allOffs[i]; j < allOffs[i+1]; j++){
int owner = proc ? proc[j] : 0;
bool keep = (!removeSelfEdges || vid != allIds[j]) &&
(!removeOffProcessEdges || owner == myRank) &&
(!removeOffGroupEdges || owner >= 0);
if (keep)
offs[i+1]++;
}
}
// from counters to offsets
for (size_t i=1; i < numVtx; i++)
offs[i+1] += offs[i];
numKeep = offs[numVtx];
// do we need a new neighbor list?
if (numNbors == numKeep){
newGidNbors = Teuchos::arcpFromArrayView(gidNbors);
newOffsets = Teuchos::arcpFromArrayView(offsets);
return numNbors;
}
else if (numKeep == 0){
newGidNbors = ArrayRCP<const zgid_t>(Teuchos::null);
newOffsets = offArray;
return 0;
}
// Build the subset neighbor lists (id, weight, and offset).
zgid_t *newGids = new zgid_t [numKeep];
env->localMemoryAssertion(__FILE__, __LINE__, numKeep, newGids);
//.........这里部分代码省略.........
示例15: testIdentifierModel
//.........这里部分代码省略.........
zgno_t nGlobalIds = M->getGlobalNumRows();
ArrayView<const zgno_t> idList = M->getRowMap()->getNodeElementList();
std::set<zgno_t> idSet(idList.begin(), idList.end());
//////////////////////////////////////////////////////////////
// Create an IdentifierModel with this input
//////////////////////////////////////////////////////////////
typedef Zoltan2::XpetraCrsMatrixAdapter<tcrsMatrix_t> adapter_t;
typedef Zoltan2::MatrixAdapter<tcrsMatrix_t> base_adapter_t;
typedef Zoltan2::StridedData<zlno_t, zscalar_t> input_t;
RCP<const adapter_t> ia = Teuchos::rcp(new adapter_t(M));
Zoltan2::IdentifierModel<base_adapter_t> *model = NULL;
RCP<const base_adapter_t> base_ia =
Teuchos::rcp_dynamic_cast<const base_adapter_t>(ia);
try {
model = new Zoltan2::IdentifierModel<base_adapter_t>(
base_ia, env, comm, modelFlags);
}
catch (std::exception &e) {
std::cerr << rank << ") " << e.what() << std::endl;
fail = 1;
}
gfail = globalFail(comm, fail);
if (gfail)
printFailureCode(comm, fail);
// Test the IdentifierModel interface
if (model->getLocalNumIdentifiers() != size_t(nLocalIds)) {
std::cerr << rank << ") getLocalNumIdentifiers "
<< model->getLocalNumIdentifiers() << " "
<< nLocalIds << std::endl;
fail = 2;
}
if (!fail && model->getGlobalNumIdentifiers() != size_t(nGlobalIds)) {
std::cerr << rank << ") getGlobalNumIdentifiers "
<< model->getGlobalNumIdentifiers() << " "
<< nGlobalIds << std::endl;
fail = 3;
}
gfail = globalFail(comm, fail);
if (gfail)
printFailureCode(comm, fail);
ArrayView<const zgno_t> gids;
ArrayView<input_t> wgts;
model->getIdentifierList(gids, wgts);
if (!fail && gids.size() != nLocalIds) {
std::cerr << rank << ") getIdentifierList IDs "
<< gids.size() << " "
<< nLocalIds << std::endl;
fail = 5;
}
if (!fail && wgts.size() != 0) {
std::cerr << rank << ") getIdentifierList Weights "
<< wgts.size() << " "
<< 0 << std::endl;
fail = 6;
}
for (zlno_t i=0; !fail && i < nLocalIds; i++) {
std::set<zgno_t>::iterator next = idSet.find(gids[i]);
if (next == idSet.end()) {
std::cerr << rank << ") getIdentifierList gid not found "
<< gids[i] << std::endl;
fail = 7;
}
}
if (!fail && consecutiveIds) {
bool inARow = Zoltan2::IdentifierTraits<zgno_t>::areConsecutive(
gids.getRawPtr(), nLocalIds);
if (!inARow) {
std::cerr << rank << ") getIdentifierList not consecutive " << std::endl;
fail = 8;
}
}
gfail = globalFail(comm, fail);
if (gfail)
printFailureCode(comm, fail);
delete model;
delete uinput;
}