本文整理汇总了C++中Epetra_MultiVector::Comm方法的典型用法代码示例。如果您正苦于以下问题:C++ Epetra_MultiVector::Comm方法的具体用法?C++ Epetra_MultiVector::Comm怎么用?C++ Epetra_MultiVector::Comm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Epetra_MultiVector
的用法示例。
在下文中一共展示了Epetra_MultiVector::Comm方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Ifpack_PrintResidual
//============================================================================
int Ifpack_PrintResidual(char* Label, const Epetra_RowMatrix& A,
const Epetra_MultiVector& X, const Epetra_MultiVector&Y)
{
if (X.Comm().MyPID() == 0) {
cout << "***** " << Label << endl;
}
Ifpack_PrintResidual(0,A,X,Y);
return(0);
}
示例2: logic_error
void Hdf5MVOutputFile::write(const Epetra_MultiVector &mv)
{
#ifdef HAVE_EPETRAEXT_HDF5
const Epetra_Comm &fileComm = mv.Comm();
EpetraExt::HDF5 hdf5Output(fileComm);
hdf5Output.Create(path()); // Truncate existing file if necessary
TEUCHOS_TEST_FOR_EXCEPTION(!hdf5Output.IsOpen(),
std::runtime_error,
"Cannot create output file: " + path());
hdf5Output.Write(groupName_, mv);
hdf5Output.Close();
#else /* HAVE_EPETRAEXT_HDF5 */
throw std::logic_error("HDF5 support disabled");
#endif /* HAVE_EPETRAEXT_HDF5 */
}
示例3: iReq
LOCA::Epetra::Interface::MultiPoint::
MultiPoint(
const Teuchos::RCP<LOCA::Epetra::Interface::Required> &iReq_,
const Teuchos::RCP< NOX::Epetra::Interface::Jacobian> &iJac_,
const Epetra_MultiVector &splitMultiVec_,
const Teuchos::RCP<Epetra_RowMatrix> &splitJac_,
const Teuchos::RCP<EpetraExt::MultiComm> &globalComm_) :
iReq(iReq_),
iJac(iJac_),
splitJac(splitJac_),
globalComm(globalComm_),
splitVec(*(splitMultiVec_(0))),
splitRes(*(splitMultiVec_(0))),
jacobian(0),
solution(0),
solutionOverlap(0),
overlapImporter(0),
timeStepsOnTimeDomain(splitMultiVec_.NumVectors()),
numTimeDomains(globalComm_->NumSubDomains()),
timeDomain(globalComm_->SubDomainRank()),
conStep(0),
rowStencil(0),
rowIndex(0)
{
if (globalComm->MyPID()==0) {
// TODO: pass in globalData and use output stream
cout << "----------MultiPoint Partition Info------------"
<< "\n\tNumProcs = " << globalComm->NumProc()
<< "\n\tSpatial Decomposition = " << splitMultiVec_.Comm().NumProc()
<< "\n\tNumber of Domains = " << numTimeDomains
<< "\n\tSteps on Domain 0 = " << timeStepsOnTimeDomain
<< "\n\tTotal Number of Steps = " << globalComm->NumTimeSteps();
cout << "\n-----------------------------------------------" << endl;
}
// Construct global block matrix graph from split jacobian and stencil,
// which is just diagonal in this case
rowStencil = new std::vector< std::vector<int> >(timeStepsOnTimeDomain);
rowIndex = new std::vector<int>;
for (int i=0; i < timeStepsOnTimeDomain; i++) {
(*rowStencil)[i].push_back(0);
(*rowIndex).push_back(i + globalComm->FirstTimeStepOnDomain());
}
jacobian = new EpetraExt::BlockCrsMatrix(*splitJac, *rowStencil,
*rowIndex, *globalComm);
// Construct global solution vector, the overlap vector,
//and importer between them
solution = new EpetraExt::BlockVector(splitJac->RowMatrixRowMap(),
jacobian->RowMap());
solutionOverlap = new EpetraExt::BlockVector(splitJac->RowMatrixRowMap(),
jacobian->ColMap());
overlapImporter = new Epetra_Import(solutionOverlap->Map(), solution->Map());
// Load initial guess into block solution vector
for (int i=0; i < timeStepsOnTimeDomain; i++)
solution->LoadBlockValues(*(splitMultiVec_(i)), (*rowIndex)[i]);
}
示例4: if
LOCA::Epetra::Interface::xyzt::
xyzt( const Teuchos::RCP<LOCA::Epetra::Interface::TimeDependent> &interface_,
const Epetra_MultiVector &splitMultiVec_,
const Teuchos::RCP<Epetra_RowMatrix> &splitJac_,
const Teuchos::RCP<EpetraExt::MultiComm> &globalComm_,
const Epetra_Vector &initialCondVec_,
double dt_,
Teuchos::ParameterList *precPrintParams_,
Teuchos::ParameterList *precLSParams_) :
interface(interface_),
splitJac(splitJac_),
globalComm(globalComm_),
splitVec(*(splitMultiVec_(0))),
splitRes(*(splitMultiVec_(0))),
splitVecOld(*(splitMultiVec_(0))),
initialCondVec(initialCondVec_),
jacobian(0),
solution(0),
solutionOverlap(0),
overlapImporter(0),
timeStepsOnTimeDomain(splitMultiVec_.NumVectors()),
numTimeDomains(globalComm_->NumSubDomains()),
timeDomain(globalComm_->SubDomainRank()),
conStep(0),
rowStencil(0),
rowIndex(0),
precPrintParams(precPrintParams_),
precLSParams(precLSParams_),
splitJacCrs(NULL),
savedSplitMassForFloquet(0),
isCrsMatrix(true),
floquetFillFlag(false),
dt(dt_)
{
if (precLSParams)
isPeriodic = precLSParams_->get("Periodic",false);
else
isPeriodic = false;
if (globalComm->MyPID()==0) {
// TODO: pass in globalData and use output stream
std::cout << "--------------XYZT Partition Info---------------"
<< "\n\tNumProcs = " << globalComm->NumProc()
<< "\n\tSpatial Decomposition = " << splitMultiVec_.Comm().NumProc()
<< "\n\tNumber of Time Domains = " << numTimeDomains
<< "\n\tTime Steps on Domain 0 = " << timeStepsOnTimeDomain
<< "\n\tNumber of Time Steps = " << globalComm->NumTimeSteps();
if (isPeriodic) std::cout << "\n\t-->Solving for a Periodic Orbit!" ;
std::cout << "\n-----------------------------------------------" << std::endl;
}
// Construct global block matrix graph from split jacobian and stencil
// Each block has identical sparsity, and assumes mass matrix's sparsity
// is a subset of the Jacobian's
rowStencil = new std::vector< std::vector<int> >(timeStepsOnTimeDomain);
rowIndex = new std::vector<int>;
for (int i=0; i < timeStepsOnTimeDomain; i++) {
if (timeDomain!=0 || i!=0)
(*rowStencil)[i].push_back(-1);
else if (isPeriodic)
(*rowStencil)[i].push_back(globalComm->NumTimeSteps()-1);
(*rowStencil)[i].push_back(0);
(*rowIndex).push_back(i + globalComm->FirstTimeStepOnDomain());
}
jacobian = new EpetraExt::BlockCrsMatrix(*splitJac, *rowStencil,
*rowIndex, *globalComm);
// Construct global solution vector, the overlap vector,
//and importer between them
solution = new EpetraExt::BlockVector(splitJac->RowMatrixRowMap(),
jacobian->RowMap());
solutionOverlap = new EpetraExt::BlockVector(splitJac->RowMatrixRowMap(),
jacobian->ColMap());
overlapImporter = new Epetra_Import(solutionOverlap->Map(), solution->Map());
// Load initial guess into block solution vector
for (int i=0; i < timeStepsOnTimeDomain; i++)
solution->LoadBlockValues(*(splitMultiVec_(i)), (*rowIndex)[i]);
// Create preconditioner
if (precLSParams != 0) {
//Preconditioner needs CrsMatrix, must convert VBR or others
splitJacCrs = dynamic_cast<Epetra_CrsMatrix *>(splitJac.get());
if (splitJacCrs == NULL) {
isCrsMatrix = false;
std::cout << "CAST OF splitJacCrs failed!, constructing CRS matrix " << std::endl;
std::vector< std::vector<int> > row(1); row[0].push_back(0);
std::vector<int> col; col.push_back(0);
splitJacCrs = (Epetra_CrsMatrix *)
new EpetraExt::BlockCrsMatrix(*splitJac, row, col,
splitJac->Comm());
}
preconditioner =
//.........这里部分代码省略.........
示例5: BsMap
int shylu_dist_solve<Epetra_CrsMatrix,Epetra_MultiVector>(
shylu_symbolic<Epetra_CrsMatrix,Epetra_MultiVector> *ssym,
shylu_data<Epetra_CrsMatrix,Epetra_MultiVector> *data,
shylu_config<Epetra_CrsMatrix,Epetra_MultiVector> *config,
const Epetra_MultiVector& X,
Epetra_MultiVector& Y
)
{
int err;
AztecOO *solver = 0;
assert(X.Map().SameAs(Y.Map()));
//assert(X.Map().SameAs(A_->RowMap()));
const Epetra_MultiVector *newX;
newX = &X;
//rd_->redistribute(X, newX);
int nvectors = newX->NumVectors();
// May have to use importer/exporter
Epetra_Map BsMap(-1, data->Snr, data->SRowElems, 0, X.Comm());
Epetra_Map BdMap(-1, data->Dnr, data->DRowElems, 0, X.Comm());
Epetra_MultiVector Bs(BsMap, nvectors);
Epetra_Import BsImporter(BsMap, newX->Map());
assert(BsImporter.SourceMap().SameAs(newX->Map()));
assert((newX->Map()).SameAs(BsImporter.SourceMap()));
Bs.Import(*newX, BsImporter, Insert);
Epetra_MultiVector Xs(BsMap, nvectors);
Epetra_SerialComm LComm; // Use Serial Comm for the local vectors.
Epetra_Map LocalBdMap(-1, data->Dnr, data->DRowElems, 0, LComm);
Epetra_MultiVector localrhs(LocalBdMap, nvectors);
Epetra_MultiVector locallhs(LocalBdMap, nvectors);
Epetra_MultiVector Z(BdMap, nvectors);
Epetra_MultiVector Bd(BdMap, nvectors);
Epetra_Import BdImporter(BdMap, newX->Map());
assert(BdImporter.SourceMap().SameAs(newX->Map()));
assert((newX->Map()).SameAs(BdImporter.SourceMap()));
Bd.Import(*newX, BdImporter, Insert);
int lda;
double *values;
err = Bd.ExtractView(&values, &lda);
assert (err == 0);
int nrows = ssym->C->RowMap().NumMyElements();
// copy to local vector //TODO: OMP ?
assert(lda == nrows);
for (int v = 0; v < nvectors; v++)
{
for (int i = 0; i < nrows; i++)
{
err = localrhs.ReplaceMyValue(i, v, values[i+v*lda]);
assert (err == 0);
}
}
// TODO : Do we need to reset the lhs and rhs here ?
if (config->amesosForDiagonal)
{
ssym->LP->SetRHS(&localrhs);
ssym->LP->SetLHS(&locallhs);
ssym->Solver->Solve();
}
else
{
ssym->ifSolver->ApplyInverse(localrhs, locallhs);
}
err = locallhs.ExtractView(&values, &lda);
assert (err == 0);
// copy to distributed vector //TODO: OMP ?
assert(lda == nrows);
for (int v = 0; v < nvectors; v++)
{
for (int i = 0; i < nrows; i++)
{
err = Z.ReplaceMyValue(i, v, values[i+v*lda]);
assert (err == 0);
}
}
Epetra_MultiVector temp1(BsMap, nvectors);
ssym->R->Multiply(false, Z, temp1);
Bs.Update(-1.0, temp1, 1.0);
Xs.PutScalar(0.0);
Epetra_LinearProblem Problem(data->Sbar.get(), &Xs, &Bs);
if (config->schurSolver == "Amesos")
{
Amesos_BaseSolver *solver2 = data->dsolver;
data->LP2->SetLHS(&Xs);
data->LP2->SetRHS(&Bs);
//cout << "Calling solve *****************************" << endl;
//.........这里部分代码省略.........