本文整理汇总了C++中Epetra_Comm::NumProc方法的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_Comm::NumProc方法的具体用法?C++ Epetra_Comm::NumProc怎么用?C++ Epetra_Comm::NumProc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Epetra_Comm
的用法示例。
在下文中一共展示了Epetra_Comm::NumProc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: random_distribution_1D
void random_distribution_1D(
itype nrows, // Number of global matrix rows
Epetra_Comm &comm, // Epetra communicator to be used in maps
Epetra_Map **rowMap, // OUTPUT: pointer to row map to be created
long long offsetEpetra64
)
{
// Randomly assign matrix rows to processor's row Map.
int me = comm.MyPID();
int np = comm.NumProc();
vector<itype> myGlobalElements(1.2 * (nrows / np) + 1);
int nMyRows = 0;
srandom(1);
double denom = (double) RAND_MAX + 1.;
for (itype i = 0; i < nrows; i++) {
int p = (int) ((double) np * (double) random() / denom);
if (p == me) {
if (nMyRows >= myGlobalElements.size())
myGlobalElements.resize(1.5*myGlobalElements.size());
myGlobalElements[nMyRows] = i + offsetEpetra64;
nMyRows++;
}
}
*rowMap = new Epetra_Map(nrows, nMyRows, &myGlobalElements[0], 0, comm);
}
示例2: global_check_for_flag_on_proc_0
bool global_check_for_flag_on_proc_0(const char* flag,
int numargs,
char** strargs,
const Epetra_Comm& comm)
{
int mypid = comm.MyPID();
int numprocs = comm.NumProc();
int flag_found = 0;
if (mypid==0) {
for(int i=0; i<numargs; ++i) {
if (strargs[i]==0) continue;
if (strcmp(flag, strargs[i]) == 0) {
flag_found = 1;
break;
}
}
}
if (numprocs > 1) {
comm.Broadcast(&flag_found, 1, 0);
}
bool return_value = flag_found==1 ? true : false;
return( return_value );
}
示例3: map
Teuchos::RCP<Epetra_CrsMatrix> buildMatrix(int nx, Epetra_Comm & comm)
{
Epetra_Map map(nx*comm.NumProc(),0,comm);
Teuchos::RCP<Epetra_CrsMatrix> mat = Teuchos::rcp(new Epetra_CrsMatrix(Copy,map,3));
int offsets[3] = {-1, 0, 1 };
double values[3] = { -1, 2, -1};
int maxGid = map.MaxAllGID();
for(int lid=0;lid<nx;lid++) {
int gid = mat->GRID(lid);
int numEntries = 3, offset = 0;
int indices[3] = { gid+offsets[0],
gid+offsets[1],
gid+offsets[2] };
if(gid==0) { // left end point
numEntries = 2;
offset = 1;
} // right end point
else if(gid==maxGid)
numEntries = 2;
// insert rows
mat->InsertGlobalValues(gid,numEntries,values+offset,indices+offset);
}
mat->FillComplete();
return mat;
}
示例4: show_matrix
void show_matrix(const char *txt, const Epetra_RowMatrix &matrix, const Epetra_Comm &comm)
{
int me = comm.MyPID();
if (comm.NumProc() > 10){
if (me == 0){
std::cout << txt << std::endl;
std::cout << "Printed matrix format only works for 10 or fewer processes" << std::endl;
}
return;
}
int numRows = matrix.NumGlobalRows();
int numCols = matrix.NumGlobalCols();
if ((numRows > 200) || (numCols > 500)){
if (me == 0){
std::cerr << txt << std::endl;
std::cerr << "show_matrix: problem is too large to display" << std::endl;
}
return;
}
int *myA = new int [numRows * numCols];
make_my_A(matrix, myA, comm);
printMatrix(txt, myA, NULL, NULL, numRows, numCols, comm);
delete [] myA;
}
示例5: rebalanceEpetraProblem
int rebalanceEpetraProblem( RCP<Epetra_Map> &Map,
RCP<Epetra_CrsMatrix> &A,
RCP<Epetra_MultiVector> &B,
RCP<Epetra_MultiVector> &X,
Epetra_Comm &Comm
)
{
// Rebalance linear system across multiple processors.
if ( Comm.NumProc() > 1 ) {
RCP<Epetra_Map> newMap = rcp( new Epetra_Map( Map->NumGlobalElements(), Map->IndexBase(), Comm ) );
RCP<Epetra_Import> newImport = rcp( new Epetra_Import( *newMap, *Map ) );
// Create rebalanced versions of the linear system.
RCP<Epetra_CrsMatrix> newA = rcp( new Epetra_CrsMatrix( BELOSEPETRACOPY, *newMap, 0 ) );
newA->Import( *A, *newImport, Insert );
newA->FillComplete();
RCP<Epetra_MultiVector> newB = rcp( new Epetra_MultiVector( *newMap, B->NumVectors() ) );
newB->Import( *B, *newImport, Insert );
RCP<Epetra_MultiVector> newX = rcp( new Epetra_MultiVector( *newMap, X->NumVectors() ) );
newX->Import( *X, *newImport, Insert );
// Set the pointers to the new rebalance linear system.
A = newA;
B = newB;
X = newX;
Map = newMap;
}
return (0);
}
示例6: generateHyprePrintOut
int generateHyprePrintOut(const char *filename, const Epetra_Comm &comm){
int MyPID = comm.MyPID();
int NumProc = comm.NumProc();
int N = 100;
int ilower = MyPID * N;
int iupper = (MyPID+1)*N-1;
double filePID = (double)MyPID/(double)100000;
std::ostringstream stream;
// Using setprecision() puts it in the std::string
stream << std::setiosflags(std::ios::fixed) << std::setprecision(5) << filePID;
// Then just ignore the first character
std::string fileName(filename);
fileName += stream.str().substr(1,7);
std::ofstream myfile(fileName.c_str());
if(myfile.is_open()){
myfile << ilower << " " << iupper << " " << ilower << " " << iupper << std::endl;
for(int i = ilower; i <= iupper; i++){
for(int j=i-5; j <= i+5; j++){
if(j >= 0 && j < N*NumProc)
myfile << i << " " << j << " " << (double)rand()/(double)RAND_MAX << std::endl;
}
}
myfile.close();
return 0;
} else {
std::cout << "\nERROR:\nCouldn't open file.\n";
return -1;
}
}
示例7:
//==============================================================================
Poisson2dOperator::Poisson2dOperator(int nx, int ny, const Epetra_Comm & comm)
: nx_(nx),
ny_(ny),
useTranspose_(false),
comm_(comm),
map_(0),
numImports_(0),
importIDs_(0),
importMap_(0),
importer_(0),
importX_(0),
Label_(0) {
Label_ = "2D Poisson Operator";
int numProc = comm.NumProc(); // Get number of processors
int myPID = comm.MyPID(); // My rank
if (2*numProc > ny) { // ny must be >= 2*numProc (to avoid degenerate cases)
ny = 2*numProc;
ny_ = ny;
std::cout << " Increasing ny to " << ny << " to avoid degenerate distribution on " << numProc << " processors." << std::endl;
}
int chunkSize = ny/numProc;
int remainder = ny%numProc;
if (myPID+1 <= remainder) chunkSize++; // add on remainder
myny_ = chunkSize;
map_ = new Epetra_Map(-1LL, ((long long)nx)*chunkSize, 0, comm_);
if (numProc>1) {
// Build import GID list to build import map and importer
if (myPID>0) numImports_ += nx;
if (myPID+1<numProc) numImports_ += nx;
if (numImports_>0) importIDs_ = new long long[numImports_];
long long * ptr = importIDs_;
long long minGID = map_->MinMyGID64();
long long maxGID = map_->MaxMyGID64();
if (myPID>0) for (int i=0; i< nx; i++) *ptr++ = minGID - nx + i;
if (myPID+1<numProc) for (int i=0; i< nx; i++) *ptr++ = maxGID + i +1;
// At the end of the above step importIDs_ will have a list of global IDs that are needed
// to compute the matrix multiplication operation on this processor. Now build import map
// and importer
importMap_ = new Epetra_Map(-1LL, numImports_, importIDs_, 0LL, comm_);
importer_ = new Epetra_Import(*importMap_, *map_);
}
}
示例8: alternate_import_constructor_test
int alternate_import_constructor_test(Epetra_Comm& Comm) {
int rv=0;
int nodes_per_proc=10;
int numprocs = Comm.NumProc();
int mypid = Comm.MyPID();
// Only run if we have multiple procs & MPI
if(numprocs==0) return 0;
#ifndef HAVE_MPI
return 0;
#endif
// Build Map 1 - linear
Epetra_Map Map1((long long)-1,nodes_per_proc,(long long)0,Comm);
// Build Map 2 - mod striped
std::vector<long long> MyGIDs(nodes_per_proc);
for(int i=0; i<nodes_per_proc; i++)
MyGIDs[i] = (mypid*nodes_per_proc + i) % numprocs;
Epetra_Map Map2((long long)-1,nodes_per_proc,&MyGIDs[0],(long long)0,Comm);
// For testing
Epetra_LongLongVector Source(Map1), Target(Map2);
// Build Import 1 - normal
Epetra_Import Import1(Map2,Map1);
rv = rv|| test_import_gid("Alt test: 2 map constructor",Source,Target, Import1);
// Build Import 2 - no-comm constructor
int Nremote=Import1.NumRemoteIDs();
const int * RemoteLIDs = Import1.RemoteLIDs();
std::vector<int> RemotePIDs(Nremote+1); // I hate you, stl vector....
std::vector<int> AllPIDs;
Epetra_Util::GetPids(Import1,AllPIDs,true);
for(int i=0; i<Nremote; i++) {
RemotePIDs[i]=AllPIDs[RemoteLIDs[i]];
}
Epetra_Import Import2(Import1.TargetMap(),Import1.SourceMap(),Nremote,&RemotePIDs[0],Import1.NumExportIDs(),Import1.ExportLIDs(),Import1.ExportPIDs());
rv = rv || test_import_gid("Alt test: no comm constructor",Source,Target,Import2);
// Build Import 3 - Remotes only
Epetra_Import Import3(Import1.TargetMap(),Import1.SourceMap(),Nremote,&RemotePIDs[0]);
rv = rv || test_import_gid("Alt test: remote only constructor",Source,Target, Import3);
return rv;
}
示例9: ConstructAutoUniform
//==============================================================================
// Epetra_BlockMap constructor function for a Epetra-defined uniform linear distribution of constant size elements.
void Epetra_BlockMap::ConstructAutoUniform(long long NumGlobal_Elements, int Element_Size, int Index_Base, const Epetra_Comm& comm, bool IsLongLong)
{
// Each processor gets roughly numGlobalPoints/p points
// This routine automatically defines a linear partitioning of a
// map with numGlobalPoints across the processors
// specified in the given Epetra_Comm
if (NumGlobal_Elements < 0)
throw ReportError("NumGlobal_Elements = " + toString(NumGlobal_Elements) + ". Should be >= 0.", -1);
if (Element_Size <= 0)
throw ReportError("ElementSize = " + toString(Element_Size) + ". Should be > 0.", -2);
BlockMapData_ = new Epetra_BlockMapData(NumGlobal_Elements, Element_Size, Index_Base, comm, IsLongLong);
int NumProc = comm.NumProc();
BlockMapData_->ConstantElementSize_ = true;
BlockMapData_->LinearMap_ = true;
int MyPID = comm.MyPID();
if(BlockMapData_->NumGlobalElements_ / NumProc > (long long) std::numeric_limits<int>::max())
throw ReportError("Epetra_BlockMap::ConstructAutoUniform: Error. Not enough space for elements on each processor", -99);
BlockMapData_->NumMyElements_ = (int) (BlockMapData_->NumGlobalElements_ / NumProc);
int remainder = (int) (BlockMapData_->NumGlobalElements_ % NumProc); // remainder will fit int
int start_index = MyPID * (BlockMapData_->NumMyElements_ + 1);
if (MyPID < remainder)
BlockMapData_->NumMyElements_++;
else
start_index -= (MyPID - remainder);
BlockMapData_->NumGlobalPoints_ = BlockMapData_->NumGlobalElements_ * BlockMapData_->ElementSize_;
BlockMapData_->NumMyPoints_ = BlockMapData_->NumMyElements_ * BlockMapData_->ElementSize_;
BlockMapData_->MinMyElementSize_ = BlockMapData_->ElementSize_;
BlockMapData_->MaxMyElementSize_ = BlockMapData_->ElementSize_;
BlockMapData_->MinElementSize_ = BlockMapData_->ElementSize_;
BlockMapData_->MaxElementSize_ = BlockMapData_->ElementSize_;
BlockMapData_->MinAllGID_ = BlockMapData_->IndexBase_;
BlockMapData_->MaxAllGID_ = BlockMapData_->MinAllGID_ + BlockMapData_->NumGlobalElements_ - 1;
BlockMapData_->MinMyGID_ = start_index + BlockMapData_->IndexBase_;
BlockMapData_->MaxMyGID_ = BlockMapData_->MinMyGID_ + BlockMapData_->NumMyElements_ - 1;
BlockMapData_->DistributedGlobal_ = IsDistributedGlobal(BlockMapData_->NumGlobalElements_, BlockMapData_->NumMyElements_);
EndOfConstructorOps();
}
示例10: build_maps
void build_maps(
itype nrows, // Number of global matrix rows
bool testEpetra64,// Flag indicating whether to adjust global row/column
// indices to exercise Epetra64 capability.
Epetra_Comm &comm, // Epetra communicator to be used in maps
Epetra_Map **vectorMap, // OUTPUT: Map to be used for the vector
Epetra_Map **rowMap, // OUTPUT: Map to be used for the matrix rows
Epetra_Map **colMap, // OUTPUT: Map to be used for the matrix cols
long long &offsetEpetra64, // OUTPUT for testing Epetra64: add offsetEpetra64
// to all row/column indices.
bool verbose // print out generated maps
)
{
// Function to build the maps for 1D or 2D matrix distribution.
// Output for 1D includes rowMap and NULL colMap and vectorMap.
// Output for 2D includes rowMap, colMap and vectorMap.
int me = comm.MyPID();
int np = comm.NumProc();
*rowMap = NULL;
*colMap = NULL;
*vectorMap = NULL;
// offsetEpetra64 = (testEpetra64 ? (long long) INT_MAX - (long long) 5 : 0);
offsetEpetra64 = (testEpetra64 ? (long long) 2 * INT_MAX : 0);
// Generate 1D row-based decomposition.
if ((me == 0) && verbose)
cout << endl
<< "1D Distribution: " << endl
<< " np = " << np << endl;
// Linear map similar to Trilinos default.
itype nMyRows = nrows / np + (nrows % np > me);
itype myFirstRow = me * (nrows / np) + MIN(nrows % np, me);
itype *myGlobalRows = new itype[nMyRows];
for (itype i = 0; i < nMyRows; i++)
myGlobalRows[i] = i + myFirstRow + offsetEpetra64;
*rowMap = new Epetra_Map(nrows, nMyRows, &myGlobalRows[0], 0, comm);
delete [] myGlobalRows;
}
示例11: rectangular
int rectangular(const Epetra_Comm& Comm, bool verbose)
{
int mypid = Comm.MyPID();
int numlocalrows = 3;
Epetra_Map rowmap((long long) -1, numlocalrows, 0, Comm);
long long numglobalrows = numlocalrows*Comm.NumProc();
long long numcols = 2*numglobalrows;
Epetra_FECrsGraph fegraph(Copy, rowmap, numcols);
long long* cols = new long long[numcols];
for(int j=0; j<numcols; ++j) cols[j] = j;
Epetra_Map domainmap((long long) -1, numcols, 0, Comm);
long long firstlocalrow = numlocalrows*mypid;
long long lastlocalrow = numlocalrows*(mypid+1)-1;
for(long long i=0; i<numglobalrows; ++i) {
//if i is a local row, then skip it. We want each processor to only
//load rows that belong on other processors.
if (i >= firstlocalrow && i <= lastlocalrow) continue;
EPETRA_CHK_ERR( fegraph.InsertGlobalIndices(1, &i, numcols, &(cols[0])) );
}
EPETRA_CHK_ERR( fegraph.GlobalAssemble(domainmap, rowmap) );
if (verbose) {
std::cout << "********************** fegraph **********************" << std::endl;
std::cout << fegraph << std::endl;
}
delete [] cols;
return(0);
}
示例12: allGatherCompact
void MPIWrapper::allGatherCompact(const Epetra_Comm &Comm, FieldContainer<Scalar> &gatheredValues,
FieldContainer<Scalar> &myValues, FieldContainer<int> &offsets)
{
int mySize = myValues.size();
int totalSize;
Comm.SumAll(&mySize, &totalSize, 1);
int myOffset = 0;
Comm.ScanSum(&mySize,&myOffset,1);
myOffset -= mySize;
gatheredValues.resize(totalSize);
for (int i=0; i<mySize; i++)
{
gatheredValues[myOffset+i] = myValues[i];
}
MPIWrapper::entryWiseSum(Comm, gatheredValues);
offsets.resize(Comm.NumProc());
offsets[Comm.MyPID()] = myOffset;
MPIWrapper::entryWiseSum(Comm, offsets);
}
示例13: Ifpack_BreakForDebugger
//============================================================================
void Ifpack_BreakForDebugger(Epetra_Comm& Comm)
{
char hostname[80];
char buf[80];
if (Comm.MyPID() == 0) cout << "Host and Process Ids for tasks" << endl;
for (int i = 0; i <Comm.NumProc() ; i++) {
if (i == Comm.MyPID() ) {
#if defined(TFLOP) || defined(JANUS_STLPORT)
sprintf(buf, "Host: %s PID: %d", "janus", getpid());
#elif defined(_WIN32)
sprintf(buf,"Windows compiler, unknown hostname and PID!");
#else
gethostname(hostname, sizeof(hostname));
sprintf(buf, "Host: %s\tComm.MyPID(): %d\tPID: %d",
hostname, Comm.MyPID(), getpid());
#endif
printf("%s\n",buf);
fflush(stdout);
#if !( defined(_WIN32) )
sleep(1);
#endif
}
}
if(Comm.MyPID() == 0) {
printf("\n");
printf("** Pausing to attach debugger...\n");
printf("** You may now attach debugger to the processes listed above.\n");
printf( "**\n");
printf( "** Enter a character to continue > "); fflush(stdout);
char go;
scanf("%c",&go);
}
Comm.Barrier();
}
示例14: checkmap
int checkmap(Epetra_Map & Map, int NumGlobalElements, int NumMyElements,
int *MyGlobalElements, int IndexBase, Epetra_Comm& Comm,
bool DistributedGlobal)
{
int i, ierr=0, forierr = 0;
EPETRA_TEST_ERR(!Map.ConstantElementSize(),ierr);
EPETRA_TEST_ERR(DistributedGlobal!=Map.DistributedGlobal(),ierr);
EPETRA_TEST_ERR(Map.ElementSize()!=1,ierr);
int *MyElementSizeList = new int[NumMyElements];
EPETRA_TEST_ERR(Map.ElementSizeList(MyElementSizeList)!=0,ierr);
forierr = 0;
for (i=0; i<NumMyElements; i++) forierr += MyElementSizeList[i]!=1;
EPETRA_TEST_ERR(forierr,ierr);
delete [] MyElementSizeList;
const Epetra_Comm & Comm1 = Map.Comm();
EPETRA_TEST_ERR(Comm1.NumProc()!=Comm.NumProc(),ierr);
EPETRA_TEST_ERR(Comm1.MyPID()!=Comm.MyPID(),ierr);
EPETRA_TEST_ERR(Map.IndexBase()!=IndexBase,ierr);
EPETRA_TEST_ERR(!Map.LinearMap() && MyGlobalElements==0,ierr);
EPETRA_TEST_ERR(Map.LinearMap() && MyGlobalElements!=0,ierr);
EPETRA_TEST_ERR(Map.MaxAllGID()!=NumGlobalElements-1+IndexBase,ierr);
EPETRA_TEST_ERR(Map.MaxElementSize()!=1,ierr);
int MaxLID = Map.MaxLID();
EPETRA_TEST_ERR(MaxLID!=NumMyElements-1,ierr);
int MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase;
if (Comm.MyPID()>2) MaxMyGID+=3;
if (!DistributedGlobal) MaxMyGID = NumMyElements-1+IndexBase;
EPETRA_TEST_ERR(Map.MaxMyGID()!=MaxMyGID,ierr);
EPETRA_TEST_ERR(Map.MinAllGID()!=IndexBase,ierr);
EPETRA_TEST_ERR(Map.MinElementSize()!=1,ierr);
EPETRA_TEST_ERR(Map.MinLID()!=0,ierr);
int MinMyGID = Comm.MyPID()*NumMyElements+IndexBase;
if (Comm.MyPID()>2) MinMyGID+=3;
if (!DistributedGlobal) MinMyGID = 0;
EPETRA_TEST_ERR(Map.MinMyGID()!=MinMyGID,ierr);
int * MyGlobalElements1 = new int[NumMyElements];
EPETRA_TEST_ERR(Map.MyGlobalElements(MyGlobalElements1)!=0,ierr);
forierr = 0;
if (MyGlobalElements==0)
{
for (i=0; i<NumMyElements; i++)
forierr += MyGlobalElements1[i]!=MinMyGID+i;
EPETRA_TEST_ERR(forierr,ierr);
}
else {
for (i=0; i<NumMyElements; i++)
forierr += MyGlobalElements[i]!=MyGlobalElements1[i];
EPETRA_TEST_ERR(forierr,ierr);
}
EPETRA_TEST_ERR(Map.NumGlobalElements()!=NumGlobalElements,ierr);
EPETRA_TEST_ERR(Map.NumGlobalPoints()!=NumGlobalElements,ierr);
EPETRA_TEST_ERR(Map.NumMyElements()!=NumMyElements,ierr);
EPETRA_TEST_ERR(Map.NumMyPoints()!=NumMyElements,ierr);
int MaxMyGID2 = Map.GID(Map.LID(MaxMyGID));
EPETRA_TEST_ERR(MaxMyGID2 != MaxMyGID,ierr);
int MaxLID2 = Map.LID(Map.GID(MaxLID));
EPETRA_TEST_ERR(MaxLID2 != MaxLID,ierr);
EPETRA_TEST_ERR(Map.GID(MaxLID+1) != IndexBase-1,ierr);// MaxLID+1 doesn't exist
EPETRA_TEST_ERR(Map.LID(MaxMyGID+1) != -1,ierr);// MaxMyGID+1 doesn't exist or is on a different processor
EPETRA_TEST_ERR(!Map.MyGID(MaxMyGID),ierr);
EPETRA_TEST_ERR(Map.MyGID(MaxMyGID+1),ierr);
EPETRA_TEST_ERR(!Map.MyLID(MaxLID),ierr);
EPETRA_TEST_ERR(Map.MyLID(MaxLID+1),ierr);
EPETRA_TEST_ERR(!Map.MyGID(Map.GID(MaxLID)),ierr);
EPETRA_TEST_ERR(Map.MyGID(Map.GID(MaxLID+1)),ierr);
EPETRA_TEST_ERR(!Map.MyLID(Map.LID(MaxMyGID)),ierr);
EPETRA_TEST_ERR(Map.MyLID(Map.LID(MaxMyGID+1)),ierr);
//.........这里部分代码省略.........
示例15: list
// ===========================================================================
void Galeri::grid::Generator::
getSquare(Epetra_Comm& comm,
const int numGlobalElementsX, const int numGlobalElementsY,
const int numDomainsX, const int numDomainsY,
Galeri::grid::Loadable& domain, Galeri::grid::Loadable& boundary,
const string what)
{
TEUCHOS_TEST_FOR_EXCEPTION(numDomainsX * numDomainsY != comm.NumProc(), std::logic_error,
"the number of processor should equal numDomainsX * numDomainsY"
<< ", now numProcs = " << comm.NumProc()
<< " and numDomainsX * numDomainsY = " << numDomainsX * numDomainsY);
TEUCHOS_TEST_FOR_EXCEPTION(numGlobalElementsX % numDomainsX != 0, std::logic_error,
"numGlobalElementsX must be a multiple of numDomainsX");
TEUCHOS_TEST_FOR_EXCEPTION(numGlobalElementsY % numDomainsY != 0, std::logic_error,
"numGlobalElementsY must be a multiple of numDomainsY");
double lx = 1.0;
double ly = 1.0;
// these are the global number of elements and vertices
int numGlobalElements = numGlobalElementsX * numGlobalElementsY;
if (what == "Triangle") numGlobalElements *= 2;
int numGlobalVertices = (numGlobalElementsX + 1) * (numGlobalElementsY + 1);
int numGlobalVerticesX = numGlobalElementsX + 1;
int numGlobalVerticesY = numGlobalElementsY + 1;
// these are the mesh sizes, hx and hy
double deltax = lx / numGlobalElementsX;
double deltay = ly / numGlobalElementsY;
// (px, py) are the coordinates of this processor.
int px = comm.MyPID() % numDomainsX;
int py = comm.MyPID() / numDomainsX;
// (numMyElementsX, numMyElementsY) are the number of elements
// in the square assigned to this processor, and
// (numMyVerticesX, numMyVerticesY) the number of vertices.
int numMyElementsX = numGlobalElementsX / numDomainsX;
int numMyElementsY = numGlobalElementsY / numDomainsY;
int numMyVerticesX = numMyElementsX + 1;
int numMyVerticesY = numMyElementsY + 1;
// (sx, sy) are the coordinates of the first element of this processor.
int sx = px * numMyElementsX;
int sy = py * numMyElementsY;
// and these are the number of vertices and elements assigned
// to this processor.
int numMyElements = numMyElementsX * numMyElementsY;
if (what == "Triangle") numMyElements *= 2;
int numMyVertices = (numMyElementsX + 1) * (numMyElementsY + 1);
Triangle triangle;
domain.initialize(comm, numGlobalElements, numMyElements, triangle);
int elementOffset = numMyElements * comm.MyPID();
int vertexOffset = px * numMyElementsX + py * numMyElementsY * numGlobalVerticesX;
int count = 0;
if (what == "Triangle")
{
for (int iy = 0; iy < numMyElementsY; ++iy)
{
for (int ix = 0; ix < numMyElementsX; ++ix)
{
int GEID = elementOffset + count++;
int GVID = vertexOffset + ix + iy * numGlobalVerticesX;
domain.setGlobalConnectivity(GEID, 0, GVID);
domain.setGlobalConnectivity(GEID, 1, GVID + 1);
domain.setGlobalConnectivity(GEID, 2, GVID + 2 + numGlobalElementsX);
GEID = elementOffset + count++;
domain.setGlobalConnectivity(GEID, 0, GVID + 2 + numGlobalElementsX);
domain.setGlobalConnectivity(GEID, 1, GVID + 1 + numGlobalElementsX);
domain.setGlobalConnectivity(GEID, 2, GVID);
}
}
}
else
{
for (int iy = 0; iy < numMyElementsY; ++iy)
{
for (int ix = 0; ix < numMyElementsX; ++ix)
{
int GEID = elementOffset + count++;
int GVID = vertexOffset + ix + iy * numGlobalVerticesX;
domain.setGlobalConnectivity(GEID, 0, GVID);
domain.setGlobalConnectivity(GEID, 1, GVID + 1);
domain.setGlobalConnectivity(GEID, 2, GVID + 2 + numGlobalElementsX);
domain.setGlobalConnectivity(GEID, 3, GVID + 1 + numGlobalElementsX);
}
}
//.........这里部分代码省略.........