本文整理汇总了C++中Epetra_BlockMap::Comm方法的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_BlockMap::Comm方法的具体用法?C++ Epetra_BlockMap::Comm怎么用?C++ Epetra_BlockMap::Comm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Epetra_BlockMap
的用法示例。
在下文中一共展示了Epetra_BlockMap::Comm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: newmap
// FIXME long long
Epetra_BlockMap
Epetra_Util::Create_OneToOne_BlockMap(const Epetra_BlockMap& usermap,
bool high_rank_proc_owns_shared)
{
// FIXME long long
//if usermap is already 1-to-1 then we'll just return a copy of it.
if (usermap.IsOneToOne()) {
Epetra_BlockMap newmap(usermap);
return(newmap);
}
int myPID = usermap.Comm().MyPID();
Epetra_Directory* directory = usermap.Comm().CreateDirectory(usermap);
int numMyElems = usermap.NumMyElements();
const int* myElems = usermap.MyGlobalElements();
int* owner_procs = new int[numMyElems*2];
int* sizes = owner_procs+numMyElems;
directory->GetDirectoryEntries(usermap, numMyElems, myElems, owner_procs,
0, sizes, high_rank_proc_owns_shared);
//we'll fill a list of map-elements which belong on this processor
int* myOwnedElems = new int[numMyElems*2];
int* ownedSizes = myOwnedElems+numMyElems;
int numMyOwnedElems = 0;
for(int i=0; i<numMyElems; ++i) {
int GID = myElems[i];
int owner = owner_procs[i];
if (myPID == owner) {
ownedSizes[numMyOwnedElems] = sizes[i];
myOwnedElems[numMyOwnedElems++] = GID;
}
}
Epetra_BlockMap one_to_one_map(-1, numMyOwnedElems, myOwnedElems,
sizes, usermap.IndexBase(), usermap.Comm());
delete [] myOwnedElems;
delete [] owner_procs;
delete directory;
return(one_to_one_map);
}
示例2: sourceGlobalElements
MapEpetra::MapEpetra ( const Epetra_BlockMap& blockMap, const Int offset, const Int maxId) :
M_commPtr(blockMap.Comm().Clone())
{
std::vector<Int> myGlobalElements;
Int* sourceGlobalElements ( blockMap.MyGlobalElements() );
Int const startIdOrig ( offset );
Int const endIdOrig ( startIdOrig + maxId );
const Int maxMyElements = std::min ( maxId, blockMap.NumMyElements() );
myGlobalElements.reserve ( maxMyElements );
//Sort MyGlobalElements to avoid a bug in Trilinos (9?) when multiplying two matrices (A * B^T)
std::sort ( myGlobalElements.begin(), myGlobalElements.end() );
// We consider that the source Map may not be ordered
for ( Int i (0); i < blockMap.NumMyElements(); ++i )
if ( sourceGlobalElements[i] < endIdOrig && sourceGlobalElements[i] >= startIdOrig )
{
myGlobalElements.push_back ( sourceGlobalElements[i] - offset );
}
createMap ( -1,
myGlobalElements.size(),
&myGlobalElements.front(),
*M_commPtr );
}
示例3:
Teuchos::Array<int>
Albany::NodeGIDsSolutionCullingStrategy::
selectedGIDs(const Epetra_BlockMap &sourceMap) const
{
Teuchos::Array<int> result;
{
Teuchos::Array<int> mySelectedGIDs;
// Subract 1 to convert exodus GIDs to our GIDs
for (int i=0; i<nodeGIDs_.size(); i++)
if (sourceMap.MyGID(nodeGIDs_[i] -1) ) mySelectedGIDs.push_back(nodeGIDs_[i] - 1);
const Epetra_Comm &comm = sourceMap.Comm();
{
int selectedGIDCount;
{
int mySelectedGIDCount = mySelectedGIDs.size();
comm.SumAll(&mySelectedGIDCount, &selectedGIDCount, 1);
}
result.resize(selectedGIDCount);
}
const int ierr = Epetra::GatherAllV(
comm,
mySelectedGIDs.getRawPtr(), mySelectedGIDs.size(),
result.getRawPtr(), result.size());
TEUCHOS_ASSERT(ierr == 0);
}
std::sort(result.begin(), result.end());
return result;
}
示例4: roundRobinMapShared
ArrayRCP<zgno_t> roundRobinMap(const Epetra_BlockMap &emap)
{
const Epetra_Comm &comm = emap.Comm();
int proc = comm.MyPID();
int nprocs = comm.NumProc();
zgno_t basegid = emap.MinAllGID();
zgno_t maxgid = emap.MaxAllGID();
size_t nglobalrows = emap.NumGlobalElements();
return roundRobinMapShared(proc, nprocs, basegid, maxgid, nglobalrows);
}
示例5: rcp
//EpetraMap_To_TpetraMap: takes in Epetra_Map object, converts it to its equivalent Tpetra::Map object,
//and returns an RCP pointer to this Tpetra::Map
Teuchos::RCP<const Tpetra_Map> Petra::EpetraMap_To_TpetraMap(const Epetra_BlockMap& epetraMap_,
const Teuchos::RCP<const Teuchos::Comm<int> >& commT_)
{
const std::size_t numElements = Teuchos::as<std::size_t>(epetraMap_.NumMyElements());
const auto indexBase = Teuchos::as<GO>(epetraMap_.IndexBase());
if (epetraMap_.DistributedGlobal() || epetraMap_.Comm().NumProc() == Teuchos::OrdinalTraits<int>::one()) {
Teuchos::Array<Tpetra_GO> indices(numElements);
int *epetra_indices = epetraMap_.MyGlobalElements();
for(LO i=0; i < numElements; i++)
indices[i] = epetra_indices[i];
const Tpetra::global_size_t computeGlobalElements = Teuchos::OrdinalTraits<Tpetra::global_size_t>::invalid();
return Teuchos::rcp(new Tpetra_Map(computeGlobalElements, indices, indexBase, commT_));
} else {
return Teuchos::rcp(new Tpetra_Map(numElements, indexBase, commT_, Tpetra::LocallyReplicated));
}
}
示例6: a
// ============================================================================
std::shared_ptr<const Tpetra::Map<int,int>>
BorderingHelpers::
extendMapBy1(const Epetra_BlockMap & map)
{
const Teuchos::Comm<int> & comm = map.Comm();
// Create a new map that hosts one more entry.
const int numGlobalElements = map.NumGlobalElements() + 1;
const int numMyElements = map.NumMyElements();
int * myGlobalElements = map.MyGlobalElements();
// The following if-else construction just makes sure that
// the Tpetra::Map<int,int> constructor is called with an extended
// map on proc 0, and with the regular old stuff on all
// other procs.
std::shared_ptr<Tpetra::Map<int,int>> extendedMap;
if (comm.MyPID() == 0) {
// Copy over the global indices.
std::vector<int> a(numMyElements+1);
for (int k = 0; k < numMyElements; k++)
a[k] = myGlobalElements[k];
// Append one more.
a[numMyElements] = map.NumGlobalElements();
extendedMap = std::make_shared<Tpetra::Map<int,int>>(
numGlobalElements,
numMyElements+1,
&a[0],
map.IndexBase(),
comm
);
} else {
extendedMap = std::make_shared<Tpetra::Map<int,int>>(
numGlobalElements,
numMyElements,
myGlobalElements,
map.IndexBase(),
comm
);
}
return extendedMap;
}
示例7: allGIDs
Teuchos::Array<int>
Albany::UniformSolutionCullingStrategy::
selectedGIDs(const Epetra_BlockMap &sourceMap) const
{
Teuchos::Array<int> allGIDs(sourceMap.NumGlobalElements());
{
const int ierr = Epetra::GatherAllV(
sourceMap.Comm(),
sourceMap.MyGlobalElements(), sourceMap.NumMyElements(),
allGIDs.getRawPtr(), allGIDs.size());
TEUCHOS_ASSERT(ierr == 0);
}
std::sort(allGIDs.begin(), allGIDs.end());
Teuchos::Array<int> result(numValues_);
const int stride = 1 + (allGIDs.size() - 1) / numValues_;
for (int i = 0; i < numValues_; ++i) {
result[i] = allGIDs[i * stride];
}
return result;
}
示例8: send
//.........这里部分代码省略.........
}
else {
//NumSend_ +=SourceMap.ElementSize(i); // Count total number of entries to send
NumSend_ +=SourceMap.MaxElementSize(); // Count total number of entries to send (currently need max)
ExportGIDs[NumExportIDs_] = SourceGIDs[i];
ExportLIDs_[NumExportIDs_++] = i;
}
}
if ( NumExportIDs_>0 && !SourceMap.DistributedGlobal())
ReportError("Warning in Epetra_Export: Serial Export has remote IDs. (Exporting from Subset of Source Map)", 1);
// Test for distributed cases
int ierr = 0;
if (SourceMap.DistributedGlobal()) {
if (NumExportIDs_>0) ExportPIDs_ = new int[NumExportIDs_];
ierr = TargetMap.RemoteIDList(NumExportIDs_, ExportGIDs, ExportPIDs_, 0); // Get remote PIDs
if( ierr ) throw ReportError("Error in Epetra_BlockMap::RemoteIDList", ierr);
//Get rid of IDs not in Target Map
if(NumExportIDs_>0) {
int cnt = 0;
for( i = 0; i < NumExportIDs_; ++i )
if( ExportPIDs_[i] == -1 ) ++cnt;
if( cnt ) {
int * NewExportGIDs = 0;
int * NewExportPIDs = 0;
int * NewExportLIDs = 0;
int cnt1 = NumExportIDs_-cnt;
if (cnt1) {
NewExportGIDs = new int[cnt1];
NewExportPIDs = new int[cnt1];
NewExportLIDs = new int[cnt1];
}
cnt = 0;
for( i = 0; i < NumExportIDs_; ++i )
if( ExportPIDs_[i] != -1 ) {
NewExportGIDs[cnt] = ExportGIDs[i];
NewExportPIDs[cnt] = ExportPIDs_[i];
NewExportLIDs[cnt] = ExportLIDs_[i];
++cnt;
}
assert(cnt==cnt1); // Sanity test
NumExportIDs_ = cnt;
delete [] ExportGIDs;
delete [] ExportPIDs_;
delete [] ExportLIDs_;
ExportGIDs = NewExportGIDs;
ExportPIDs_ = NewExportPIDs;
ExportLIDs_ = NewExportLIDs;
ReportError("Warning in Epetra_Export: Source IDs not found in Target Map (Do you want to export from subset of Source Map?)", 1 );
}
}
//Make sure Export IDs are ordered by processor
Epetra_Util util;
int * tmpPtr[2];
tmpPtr[0] = ExportLIDs_, tmpPtr[1] = ExportGIDs;
util.Sort(true,NumExportIDs_,ExportPIDs_,0,0,2,tmpPtr);
Distor_ = SourceMap.Comm().CreateDistributor();
// Construct list of exports that calling processor needs to send as a result
// of everyone asking for what it needs to receive.
ierr = Distor_->CreateFromSends( NumExportIDs_, ExportPIDs_, true, NumRemoteIDs_);
if (ierr!=0) throw ReportError("Error in Epetra_Distributor.CreateFromSends()", ierr);
// Use comm plan with ExportGIDs to find out who is sending to us and
// get proper ordering of GIDs for remote entries
// (that we will convert to LIDs when done).
if (NumRemoteIDs_>0) RemoteLIDs_ = new int[NumRemoteIDs_]; // Allocate space for LIDs in target that are
// going to get something from off-processor.
char * cRemoteGIDs = 0; //Do will alloc memory for this object
int LenCRemoteGIDs = 0;
ierr = Distor_->Do(reinterpret_cast<char *> (ExportGIDs),
sizeof( int ),
LenCRemoteGIDs,
cRemoteGIDs);
if (ierr) throw ReportError("Error in Epetra_Distributor.Do()", ierr);
int * RemoteGIDs = reinterpret_cast<int*>(cRemoteGIDs);
// Remote IDs come in as GIDs, convert to LIDs
for (i=0; i< NumRemoteIDs_; i++) {
RemoteLIDs_[i] = TargetMap.LID(RemoteGIDs[i]);
//NumRecv_ += TargetMap.ElementSize(RemoteLIDs_[i]); // Count total number of entries to receive
NumRecv_ += TargetMap.MaxElementSize(); // Count total number of entries to receive (currently need max)
}
if (NumExportIDs_>0) delete [] ExportGIDs;
if (LenCRemoteGIDs>0) delete [] cRemoteGIDs;
}
if (NumTargetIDs>0) delete [] TargetGIDs;
if (NumSourceIDs>0) delete [] SourceGIDs;
return;
}
示例9: assert
int
LOCA::Epetra::AugmentedOp::blockMap2PointMap(const Epetra_BlockMap& BlockMap,
Epetra_Map*& PointMap) const
{
// Generate an Epetra_Map that has the same number and distribution of points
// as the input Epetra_BlockMap object. The global IDs for the output PointMap
// are computed by using the MaxElementSize of the BlockMap. For variable block
// sizes this will create gaps in the GID space, but that is OK for Epetra_Maps.
int MaxElementSize = BlockMap.MaxElementSize();
int PtNumMyElements = BlockMap.NumMyPoints();
int * PtMyGlobalElements = 0;
if (PtNumMyElements>0) PtMyGlobalElements = new int[PtNumMyElements];
int NumMyElements = BlockMap.NumMyElements();
int curID = 0;
for (int i=0; i<NumMyElements; i++) {
int StartID = BlockMap.GID(i)*MaxElementSize;
int ElementSize = BlockMap.ElementSize(i);
for (int j=0; j<ElementSize; j++) PtMyGlobalElements[curID++] = StartID+j;
}
assert(curID==PtNumMyElements); // Sanity test
PointMap = new Epetra_Map(-1, PtNumMyElements, PtMyGlobalElements, BlockMap.IndexBase(), BlockMap.Comm());
if (PtNumMyElements>0) delete [] PtMyGlobalElements;
if (!BlockMap.PointSameAs(*PointMap)) {EPETRA_CHK_ERR(-1);} // Maps not compatible
return(0);
}
示例10: Construct
//.........这里部分代码省略.........
int cnt = 0;
for( i = 0; i < NumRemoteIDs_; ++i )
if( RemotePIDs[i] == -1 ) ++cnt;
if( cnt ) {
if( NumRemoteIDs_-cnt ) {
int_type * NewRemoteGIDs = new int_type[NumRemoteIDs_-cnt];
int * NewRemotePIDs = new int[NumRemoteIDs_-cnt];
int * NewRemoteLIDs = new int[NumRemoteIDs_-cnt];
cnt = 0;
for( i = 0; i < NumRemoteIDs_; ++i )
if( RemotePIDs[i] != -1 ) {
NewRemoteGIDs[cnt] = RemoteGIDs[i];
NewRemotePIDs[cnt] = RemotePIDs[i];
NewRemoteLIDs[cnt] = targetMap.LID(RemoteGIDs[i]);
++cnt;
}
NumRemoteIDs_ = cnt;
delete [] RemoteGIDs;
delete [] RemotePIDs;
delete [] RemoteLIDs_;
RemoteGIDs = NewRemoteGIDs;
RemotePIDs = NewRemotePIDs;
RemoteLIDs_ = NewRemoteLIDs;
ReportError("Warning in Epetra_Import: Target IDs not found in Source Map (Do you want to import to subset of Target Map?)", 1);
}
else { //valid RemoteIDs empty
NumRemoteIDs_ = 0;
delete [] RemoteGIDs;
RemoteGIDs = 0;
delete [] RemotePIDs;
RemotePIDs = 0;
}
}
}
//Sort Remote IDs by processor so DoReverses will work
Epetra_Util util;
if(targetMap.GlobalIndicesLongLong())
{
util.Sort(true,NumRemoteIDs_,RemotePIDs,0,0, 1,&RemoteLIDs_, 1,(long long**)&RemoteGIDs);
}
else if(targetMap.GlobalIndicesInt())
{
int* ptrs[2] = {RemoteLIDs_, (int*)RemoteGIDs};
util.Sort(true,NumRemoteIDs_,RemotePIDs,0,0,2,&ptrs[0], 0, 0);
}
else
{
throw ReportError("Epetra_Import::Epetra_Import: GlobalIndices Internal Error", -1);
}
Distor_ = sourceMap.Comm().CreateDistributor();
// Construct list of exports that calling processor needs to send as a result
// of everyone asking for what it needs to receive.
bool Deterministic = true;
int_type* tmp_ExportLIDs; //Export IDs come in as GIDs
ierr = Distor_->CreateFromRecvs( NumRemoteIDs_, RemoteGIDs, RemotePIDs,
Deterministic, NumExportIDs_, tmp_ExportLIDs, ExportPIDs_ );
if (ierr!=0) throw ReportError("Error in Epetra_Distributor.CreateFromRecvs()", ierr);
// Export IDs come in as GIDs, convert to LIDs
if(targetMap.GlobalIndicesLongLong())
{
ExportLIDs_ = new int[NumExportIDs_];
for (i=0; i< NumExportIDs_; i++) {
if (ExportPIDs_[i] < 0) throw ReportError("targetMap requested a GID that is not in the sourceMap.", -1);
ExportLIDs_[i] = sourceMap.LID(tmp_ExportLIDs[i]);
NumSend_ += sourceMap.MaxElementSize(); // Count total number of entries to send (currently need max)
}
delete[] tmp_ExportLIDs;
}
else if(targetMap.GlobalIndicesInt())
{
for (i=0; i< NumExportIDs_; i++) {
if (ExportPIDs_[i] < 0) throw ReportError("targetMap requested a GID that is not in the sourceMap.", -1);
tmp_ExportLIDs[i] = sourceMap.LID(tmp_ExportLIDs[i]);
NumSend_ += sourceMap.MaxElementSize(); // Count total number of entries to send (currently need max)
}
ExportLIDs_ = reinterpret_cast<int *>(tmp_ExportLIDs); // Can't reach here if tmp_ExportLIDs is long long.
}
else
{
throw ReportError("Epetra_Import::Epetra_Import: GlobalIndices Internal Error", -1);
}
}
if( NumRemoteIDs_>0 ) delete [] RemoteGIDs;
if( NumRemoteIDs_>0 ) delete [] RemotePIDs;
if (NumTargetIDs>0) delete [] TargetGIDs;
if (NumSourceIDs>0) delete [] SourceGIDs;
return;
}
示例11: BlockMapToMatrixMarketFile
int BlockMapToMatrixMarketFile( const char *filename, const Epetra_BlockMap & map,
const char * mapName,
const char *mapDescription,
bool writeHeader) {
int M = map.NumGlobalElements();
int N = 1;
if (map.MaxElementSize()>1) N = 2; // Non-trivial block map, store element sizes in second column
FILE * handle = 0;
if (map.Comm().MyPID()==0) { // Only PE 0 does this section
handle = fopen(filename,"w");
if (!handle) return(-1);
MM_typecode matcode;
mm_initialize_typecode(&matcode);
mm_set_matrix(&matcode);
mm_set_array(&matcode);
mm_set_integer(&matcode);
if (writeHeader==true) { // Only write header if requested (true by default)
if (mm_write_banner(handle, matcode)) return(-1);
if (mapName!=0) fprintf(handle, "%% \n%% %s\n", mapName);
if (mapDescription!=0) fprintf(handle, "%% %s\n%% \n", mapDescription);
}
}
if (writeHeader==true) { // Only write header if requested (true by default)
// Make an Epetra_IntVector of length numProc such that all elements are on PE 0 and
// the ith element is NumMyElements from the ith PE
Epetra_Map map1(-1, 1, 0, map.Comm()); // map with one element on each processor
int length = 0;
if (map.Comm().MyPID()==0) length = map.Comm().NumProc();
Epetra_Map map2(-1, length, 0, map.Comm());
Epetra_Import lengthImporter(map2, map1);
Epetra_IntVector v1(map1);
Epetra_IntVector v2(map2);
v1[0] = map.NumMyElements();
if (v2.Import(v1, lengthImporter, Insert)) return(-1);
if (map.Comm().MyPID()==0) {
fprintf(handle, "%s", "%Format Version:\n");
//int version = 1; // We may change the format scheme at a later date.
fprintf(handle, "%% %d \n", map.Comm().NumProc());
fprintf(handle, "%s", "%NumProc: Number of processors:\n");
fprintf(handle, "%% %d \n", map.Comm().NumProc());
fprintf(handle, "%s", "%MaxElementSize: Maximum element size:\n");
fprintf(handle, "%% %d \n", map.MaxElementSize());
fprintf(handle, "%s", "%MinElementSize: Minimum element size:\n");
fprintf(handle, "%% %d \n", map.MinElementSize());
fprintf(handle, "%s", "%IndexBase: Index base of map:\n");
fprintf(handle, "%% %d \n", map.IndexBase());
fprintf(handle, "%s", "%NumGlobalElements: Total number of GIDs in map:\n");
fprintf(handle, "%% %d \n", map.NumGlobalElements());
fprintf(handle, "%s", "%NumMyElements: BlockMap lengths per processor:\n");
for ( int i=0; i< v2.MyLength(); i++) fprintf(handle, "%% %d\n", v2[i]);
if (mm_write_mtx_array_size(handle, M, N)) return(-1);
}
}
if (BlockMapToHandle(handle, map)) return(-1); // Everybody calls this routine
if (map.Comm().MyPID()==0) // Only PE 0 opened a file
if (fclose(handle)) return(-1);
return(0);
}
示例12: BlockMapToHandle
int BlockMapToHandle(FILE * handle, const Epetra_BlockMap & map) {
const Epetra_Comm & comm = map.Comm();
int numProc = comm.NumProc();
bool doSizes = !map.ConstantElementSize();
if (numProc==1) {
int * myElements = map.MyGlobalElements();
int * elementSizeList = 0;
if (doSizes) elementSizeList = map.ElementSizeList();
return(writeBlockMap(handle, map.NumGlobalElements(), myElements, elementSizeList, doSizes));
}
int numRows = map.NumMyElements();
Epetra_Map allGidsMap(-1, numRows, 0,comm);
Epetra_IntVector allGids(allGidsMap);
for (int i=0; i<numRows; i++) allGids[i] = map.GID(i);
Epetra_IntVector allSizes(allGidsMap);
for (int i=0; i<numRows; i++) allSizes[i] = map.ElementSize(i);
// Now construct a Map on PE 0 by strip-mining the rows of the input matrix map.
int numChunks = numProc;
int stripSize = allGids.GlobalLength()/numChunks;
int remainder = allGids.GlobalLength()%numChunks;
int curStart = 0;
int curStripSize = 0;
Epetra_IntSerialDenseVector importGidList;
Epetra_IntSerialDenseVector importSizeList;
if (comm.MyPID()==0) {
importGidList.Size(stripSize+1); // Set size of vector to max needed
if (doSizes) importSizeList.Size(stripSize+1); // Set size of vector to max needed
}
for (int i=0; i<numChunks; i++) {
if (comm.MyPID()==0) { // Only PE 0 does this part
curStripSize = stripSize;
if (i<remainder) curStripSize++; // handle leftovers
for (int j=0; j<curStripSize; j++) importGidList[j] = j + curStart;
curStart += curStripSize;
}
// The following import map will be non-trivial only on PE 0.
Epetra_Map importGidMap(-1, curStripSize, importGidList.Values(), 0, comm);
Epetra_Import gidImporter(importGidMap, allGidsMap);
Epetra_IntVector importGids(importGidMap);
if (importGids.Import(allGids, gidImporter, Insert)) return(-1);
Epetra_IntVector importSizes(importGidMap);
if (doSizes) if (importSizes.Import(allSizes, gidImporter, Insert)) return(-1);
// importGids (and importSizes, if non-trivial block map)
// now have a list of GIDs (and sizes, respectively) for the current strip of map.
int * myElements = importGids.Values();
int * elementSizeList = 0;
if (doSizes) elementSizeList = importSizes.Values();
// Finally we are ready to write this strip of the map to file
writeBlockMap(handle, importGids.MyLength(), myElements, elementSizeList, doSizes);
}
return(0);
}
示例13: MatrixMarketFileToMultiVector
int MatrixMarketFileToMultiVector( const char *filename, const Epetra_BlockMap & map, Epetra_MultiVector * & A) {
const int lineLength = 1025;
const int tokenLength = 35;
char line[lineLength];
char token1[tokenLength];
char token2[tokenLength];
char token3[tokenLength];
char token4[tokenLength];
char token5[tokenLength];
int M, N;
FILE * handle = 0;
handle = fopen(filename,"r"); // Open file
if (handle == 0)
EPETRA_CHK_ERR(-1); // file not found
// Check first line, which should be "%%MatrixMarket matrix coordinate real general" (without quotes)
if(fgets(line, lineLength, handle)==0) return(-1);
if(sscanf(line, "%s %s %s %s %s", token1, token2, token3, token4, token5 )==0) return(-1);
if (strcmp(token1, "%%MatrixMarket") ||
strcmp(token2, "matrix") ||
strcmp(token3, "array") ||
strcmp(token4, "real") ||
strcmp(token5, "general")) return(-1);
// Next, strip off header lines (which start with "%")
do {
if(fgets(line, lineLength, handle)==0) return(-1);
} while (line[0] == '%');
// Next get problem dimensions: M, N
if(sscanf(line, "%d %d", &M, &N)==0) return(-1);
// Compute the offset for each processor for when it should start storing values
int numMyPoints = map.NumMyPoints();
int offset;
map.Comm().ScanSum(&numMyPoints, &offset, 1); // ScanSum will compute offsets for us
offset -= numMyPoints; // readjust for my PE
// Now construct vector/multivector
if (N==1)
A = new Epetra_Vector(map);
else
A = new Epetra_MultiVector(map, N);
double ** Ap = A->Pointers();
for (int j=0; j<N; j++) {
double * v = Ap[j];
// Now read in lines that we will discard
for (int i=0; i<offset; i++)
if(fgets(line, lineLength, handle)==0) return(-1);
// Now read in each value and store to the local portion of the the if the row is owned.
double V;
for (int i=0; i<numMyPoints; i++) {
if(fgets(line, lineLength, handle)==0) return(-1);
if(sscanf(line, "%lg\n", &V)==0) return(-1);
v[i] = V;
}
// Now read in the rest of the lines to discard
for (int i=0; i < M-numMyPoints-offset; i++) {
if(fgets(line, lineLength, handle)==0) return(-1);
}
}
if (fclose(handle)) return(-1);
return(0);
}
示例14: compute_graph_metrics
static int compute_graph_metrics(const Epetra_BlockMap &rowmap, const Epetra_BlockMap &colmap,
std::vector<std::vector<int> > &rows,
Isorropia::Epetra::CostDescriber &costs, double &myGoalWeight,
double &balance, int &numCuts, double &cutWgt, double &cutn, double &cutl)
{
const Epetra_Comm &comm = rowmap.Comm();
int myProc = comm.MyPID();
int myCols = colmap.NumMyElements();
double min, avg;
std::map<int, float> vertexWeights;
std::map<int, std::map<int, float > > graphEdgeWeights;
std::map<int, float> hyperEdgeWeights;
costs.getCosts(vertexWeights, // vertex global ID -> weight
graphEdgeWeights, // vertex global ID -> map from neighbor global ID to edge weight
hyperEdgeWeights); // hyperedge global ID -> weight
// Compute the balance
Epetra_Vector vwgt(rowmap);
int numVWgts = vertexWeights.size();
if (numVWgts > 0){
double *wvals = new double [numVWgts];
int *gids = new int [numVWgts];
std::map<int, float>::iterator vnext = vertexWeights.begin();
int i=0;
while (vnext != vertexWeights.end()){
wvals[i] = vnext->second;
gids[i] = vnext->first;
vnext++;
i++;
}
vwgt.ReplaceGlobalValues(i, wvals, gids);
delete [] wvals;
delete [] gids;
}
else{
vwgt.PutScalar(1.0); // default to unit weights
}
compute_balance(vwgt, myGoalWeight, min, balance, avg);
if (balance < 0){
return 1;
}
// Compute the measures based on cut edges
int *procID = new int [myCols];
int *GID = new int [myCols];
int *tmp = new int [myCols];
for (int i=0; i < myCols; i++){
GID[i] = colmap.GID(i);
}
rowmap.RemoteIDList(myCols, GID, procID, tmp); // matrix is square
delete [] tmp;
int haveEdgeWeights = graphEdgeWeights.size();
int localNumCuts = 0;
double localCutWgt = 0.0;
double localCutn = 0.0;
double localCutl = 0.0;
for (int i=0; i < rowmap.NumMyElements(); i++){
int vtxGID = rowmap.GID(i);
int numEdges = rows[i].size();
if (numEdges > 0){
std::map<int, std::map<int, float> >::iterator wnext;
if (haveEdgeWeights){
wnext = graphEdgeWeights.find(vtxGID);
if (wnext == graphEdgeWeights.end()){
std::cerr << "Graph edge weights are missing for vertex " << vtxGID;
std::cerr << std::endl;
return -1;
}
}
double heWeight = 0.0;
std::set<int> nbors;
for (int j=0; j < numEdges; j++){
int colGID = GID[rows[i][j]];
int nborProc = procID[rows[i][j]];
if (colGID == vtxGID) continue; // skip self edges
float wgt = 1.0;
//.........这里部分代码省略.........
示例15: MultiVectorTests
int MultiVectorTests(const Epetra_BlockMap & Map, int NumVectors, bool verbose)
{
(void)NumVectors;
const Epetra_Comm & Comm = Map.Comm();
int ierr = 0;
/* get number of processors and the name of this processor */
// int NumProc = Comm.getNumProc();
int MyPID = Comm.MyPID();
// Construct FEVector
if (verbose&&MyPID==0) cout << "constructing Epetra_FEVector" << endl;
Epetra_FEVector A(Map, 1);
//For an extreme test, we'll have each processor sum-in a 1.0 for All
//global ids.
int minGID = Map.MinAllGID();
int numGlobalIDs = Map.NumGlobalElements();
//For now we're going to have just one point associated with
//each GID (element).
int* ptIndices = new int[numGlobalIDs];
double* ptCoefs = new double[numGlobalIDs];
Epetra_IntSerialDenseVector epetra_indices(View, ptIndices, numGlobalIDs);
Epetra_SerialDenseVector epetra_coefs(View, ptCoefs, numGlobalIDs);
{for(int i=0; i<numGlobalIDs; ++i) {
ptIndices[i] = minGID+i;
ptCoefs[i] = 1.0;
}}
if (verbose&&MyPID==0) {
cout << "calling A.SumIntoGlobalValues with " << numGlobalIDs << " values"<<endl;
}
EPETRA_TEST_ERR( A.SumIntoGlobalValues(numGlobalIDs, ptIndices, ptCoefs), ierr);
if (verbose&&MyPID==0) {
cout << "calling A.SumIntoGlobalValues with " << numGlobalIDs << " values"<<endl;
}
EPETRA_TEST_ERR( A.SumIntoGlobalValues(epetra_indices, epetra_coefs), ierr);
if (verbose&&MyPID==0) {
cout << "calling A.GlobalAssemble()" << endl;
}
EPETRA_TEST_ERR( A.GlobalAssemble(), ierr );
if (verbose&&MyPID==0) {
cout << "after globalAssemble"<<endl;
}
if (verbose) {
A.Print(cout);
}
//now do a quick test of the copy constructor
Epetra_FEVector B(A);
double nrm2a, nrm2b;
A.Norm2(&nrm2a);
B.Norm2(&nrm2b);
if (nrm2a != nrm2b) {
cerr << "copy-constructor test failed, norm of copy doesn't equal"
<< " norm of original."<<endl;
return(-1);
}
delete [] ptIndices;
delete [] ptCoefs;
return(ierr);
}