本文整理汇总了C++中teuchos::RCP::GlobalAssemble方法的典型用法代码示例。如果您正苦于以下问题:C++ RCP::GlobalAssemble方法的具体用法?C++ RCP::GlobalAssemble怎么用?C++ RCP::GlobalAssemble使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类teuchos::RCP
的用法示例。
在下文中一共展示了RCP::GlobalAssemble方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: X
Teuchos::RCP<Epetra_CrsMatrix> Epetra_Operator_to_Epetra_Matrix::constructInverseMatrix(const Epetra_Operator &op, const Epetra_Map &map)
{
int numEntriesPerRow = 0;
Teuchos::RCP<Epetra_FECrsMatrix> matrix = Teuchos::rcp(new Epetra_FECrsMatrix(::Copy, map, numEntriesPerRow));
int numRows = map.NumGlobalElements();
Epetra_Vector X(map);
Epetra_Vector Y(map);
double tol = 1e-15; // values below this will be considered 0
for (int rowIndex=0; rowIndex<numRows; rowIndex++)
{
int lid = map.LID(rowIndex);
if (lid != -1)
{
X[lid] = 1.0;
}
op.ApplyInverse(X, Y);
if (lid != -1)
{
X[lid] = 0.0;
}
std::vector<double> values;
std::vector<int> indices;
for (int i=0; i<map.NumMyElements(); i++)
{
if (abs(Y[i]) > tol)
{
values.push_back(Y[i]);
indices.push_back(map.GID(i));
}
}
matrix->InsertGlobalValues(rowIndex, values.size(), &values[0], &indices[0]);
}
matrix->GlobalAssemble();
return matrix;
}
示例2: main
//.........这里部分代码省略.........
Epetra_MpiComm comm(MPI_COMM_WORLD);
Epetra_Map origmap(global_n, 3, &mynodes[0], 0, comm);
Teuchos::RCP<Epetra_FECrsMatrix> matrix =
Teuchos::rcp(new Epetra_FECrsMatrix(Copy, origmap, 0));
//We'll assemble elements E0 and E1 on proc 0,
// element E2 or proc 1,
// element E3 on proc 2.
std::vector<int> indices(nodesPerElem);
std::vector<double> coefs(nodesPerElem*nodesPerElem,2.0);
if (localProc == 0) {
//element E0:
indices[0] = 0; indices[1] = 1; indices[2] = 2; indices[3] = 3;
matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
//element E1:
indices[0] = 1; indices[1] = 4; indices[2] = 5; indices[3] = 2;
matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
}
else if (localProc == 1) {
//element E2:
indices[0] = 3; indices[1] = 2; indices[2] = 7; indices[3] = 8;
matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
}
else { //localProc==2
//element E3:
indices[0] = 2; indices[1] = 5; indices[2] = 6; indices[3] = 7;
matrix->InsertGlobalValues(nodesPerElem, &indices[0], &coefs[0]);
}
int err = matrix->GlobalAssemble();
if (err != 0) {
std::cout << "err="<<err<<" returned from matrix->GlobalAssemble()"
<< std::endl;
}
// std::cout << "matrix: " << std::endl;
// std::cout << *matrix << std::endl;
//We'll need a Teuchos::ParameterList object to pass to the
//Isorropia::Epetra::Partitioner class.
Teuchos::ParameterList paramlist;
#ifdef HAVE_ISORROPIA_ZOLTAN
// If Zoltan is available, we'll specify that the Zoltan package be
// used for the partitioning operation, by creating a parameter
// sublist named "Zoltan".
// In the sublist, we'll set parameters that we want sent to Zoltan.
paramlist.set("PARTITIONING METHOD", "GRAPH");
paramlist.set("PRINT ZOLTAN METRICS", "2");
Teuchos::ParameterList& sublist = paramlist.sublist("Zoltan");
sublist.set("GRAPH_PACKAGE", "PHG");
//sublist.set("DEBUG_LEVEL", "1"); // Zoltan will print out parameters
//sublist.set("DEBUG_LEVEL", "5"); // proc 0 will trace Zoltan calls
//sublist.set("DEBUG_MEMORY", "2"); // Zoltan will trace alloc & free
#else
// If Zoltan is not available, a simple linear partitioner will be
// used to partition such that the number of nonzeros is equal (or
// close to equal) on each processor. No parameter is necessary to
// specify this.