本文整理汇总了C++中teuchos::RCP::InsertGlobalIndices方法的典型用法代码示例。如果您正苦于以下问题:C++ RCP::InsertGlobalIndices方法的具体用法?C++ RCP::InsertGlobalIndices怎么用?C++ RCP::InsertGlobalIndices使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类teuchos::RCP
的用法示例。
在下文中一共展示了RCP::InsertGlobalIndices方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: index
Teuchos::RCP<Epetra_CrsGraph>
sparse3Tensor2CrsGraph(
const Stokhos::Sparse3Tensor<ordinal_type,value_type>& Cijk,
const Epetra_BlockMap& map)
{
typedef Stokhos::Sparse3Tensor<ordinal_type,value_type> Cijk_type;
// Graph to be created
Teuchos::RCP<Epetra_CrsGraph> graph =
Teuchos::rcp(new Epetra_CrsGraph(Copy, map, 0));
// Loop over Cijk entries including a non-zero in the graph at
// indices (i,j) if there is any k for which Cijk is non-zero
for (typename Cijk_type::k_iterator k_it=Cijk.k_begin();
k_it!=Cijk.k_end(); ++k_it) {
for (typename Cijk_type::kj_iterator j_it = Cijk.j_begin(k_it);
j_it != Cijk.j_end(k_it); ++j_it) {
ordinal_type j = index(j_it);
for (typename Cijk_type::kji_iterator i_it = Cijk.i_begin(j_it);
i_it != Cijk.i_end(j_it); ++i_it) {
ordinal_type i = index(i_it);
graph->InsertGlobalIndices(i, 1, &j);
}
}
}
// Sort, remove redundencies, transform to local, ...
graph->FillComplete();
return graph;
}
示例2: map
Teuchos::RCP<Epetra_CrsGraph>
sparse3Tensor2CrsGraph(
const Stokhos::OrthogPolyBasis<ordinal_type,value_type>& basis,
const Stokhos::Sparse3Tensor<ordinal_type,value_type>& Cijk,
const Epetra_Comm& comm)
{
// Number of stochastic rows
ordinal_type num_rows = basis.size();
// Replicated local map
Epetra_LocalMap map(num_rows, 0, comm);
// Graph to be created
Teuchos::RCP<Epetra_CrsGraph> graph =
Teuchos::rcp(new Epetra_CrsGraph(Copy, map, 0));
// Loop over Cijk entries including a non-zero in the graph at
// indices (i,j) if there is any k for which Cijk is non-zero
ordinal_type Cijk_size = Cijk.size();
for (ordinal_type k=0; k<Cijk_size; k++) {
ordinal_type nj = Cijk.num_j(k);
const Teuchos::Array<int>& j_indices = Cijk.Jindices(k);
for (ordinal_type jj=0; jj<nj; jj++) {
ordinal_type j = j_indices[jj];
const Teuchos::Array<int>& i_indices = Cijk.Iindices(k,jj);
ordinal_type ni = i_indices.size();
for (ordinal_type ii=0; ii<ni; ii++) {
ordinal_type i = i_indices[ii];
graph->InsertGlobalIndices(i, 1, &j);
}
}
}
// Sort, remove redundencies, transform to local, ...
graph->FillComplete();
return graph;
}
示例3: PerformSymbolicFactorization
//=============================================================================
int Amesos_Dscpack::PerformSymbolicFactorization()
{
ResetTimer(0);
ResetTimer(1);
MyPID_ = Comm().MyPID();
NumProcs_ = Comm().NumProc();
Epetra_RowMatrix *RowMatrixA = Problem_->GetMatrix();
if (RowMatrixA == 0)
AMESOS_CHK_ERR(-1);
const Epetra_Map& OriginalMap = RowMatrixA->RowMatrixRowMap() ;
const Epetra_MpiComm& comm1 = dynamic_cast<const Epetra_MpiComm &> (Comm());
int numrows = RowMatrixA->NumGlobalRows();
int numentries = RowMatrixA->NumGlobalNonzeros();
Teuchos::RCP<Epetra_CrsGraph> Graph;
Epetra_CrsMatrix* CastCrsMatrixA =
dynamic_cast<Epetra_CrsMatrix*>(RowMatrixA);
if (CastCrsMatrixA)
{
Graph = Teuchos::rcp(const_cast<Epetra_CrsGraph*>(&(CastCrsMatrixA->Graph())), false);
}
else
{
int MaxNumEntries = RowMatrixA->MaxNumEntries();
Graph = Teuchos::rcp(new Epetra_CrsGraph(Copy, OriginalMap, MaxNumEntries));
std::vector<int> Indices(MaxNumEntries);
std::vector<double> Values(MaxNumEntries);
for (int i = 0 ; i < RowMatrixA->NumMyRows() ; ++i)
{
int NumEntries;
RowMatrixA->ExtractMyRowCopy(i, MaxNumEntries, NumEntries,
&Values[0], &Indices[0]);
for (int j = 0 ; j < NumEntries ; ++j)
Indices[j] = RowMatrixA->RowMatrixColMap().GID(Indices[j]);
int GlobalRow = RowMatrixA->RowMatrixRowMap().GID(i);
Graph->InsertGlobalIndices(GlobalRow, NumEntries, &Indices[0]);
}
Graph->FillComplete();
}
//
// Create a replicated map and graph
//
std::vector<int> AllIDs( numrows ) ;
for ( int i = 0; i < numrows ; i++ ) AllIDs[i] = i ;
Epetra_Map ReplicatedMap( -1, numrows, &AllIDs[0], 0, Comm());
Epetra_Import ReplicatedImporter(ReplicatedMap, OriginalMap);
Epetra_CrsGraph ReplicatedGraph( Copy, ReplicatedMap, 0 );
AMESOS_CHK_ERR(ReplicatedGraph.Import(*Graph, ReplicatedImporter, Insert));
AMESOS_CHK_ERR(ReplicatedGraph.FillComplete());
//
// Convert the matrix to Ap, Ai
//
std::vector <int> Replicates(numrows);
std::vector <int> Ap(numrows + 1);
std::vector <int> Ai(EPETRA_MAX(numrows, numentries));
for( int i = 0 ; i < numrows; i++ ) Replicates[i] = 1;
int NumEntriesPerRow ;
int *ColIndices = 0 ;
int Ai_index = 0 ;
for ( int MyRow = 0; MyRow <numrows; MyRow++ ) {
AMESOS_CHK_ERR( ReplicatedGraph.ExtractMyRowView( MyRow, NumEntriesPerRow, ColIndices ) );
Ap[MyRow] = Ai_index ;
for ( int j = 0; j < NumEntriesPerRow; j++ ) {
Ai[Ai_index] = ColIndices[j] ;
Ai_index++;
}
}
assert( Ai_index == numentries ) ;
Ap[ numrows ] = Ai_index ;
MtxConvTime_ = AddTime("Total matrix conversion time", MtxConvTime_, 0);
ResetTimer(0);
//
// Call Dscpack Symbolic Factorization
//
int OrderCode = 2;
std::vector<double> MyANonZ;
NumLocalNonz = 0 ;
GlobalStructNewColNum = 0 ;
GlobalStructNewNum = 0 ;
//.........这里部分代码省略.........
示例4: C_localRMap
//.........这里部分代码省略.........
// TODO: Can do better than this, just need to go to the column map
// of C, there might be null columns in C
// Not much of use for Shasta 2x2 .. Later.
probevec.ReplaceGlobalValue(g_rows[cindex], k, 1.0);
//if (mypid == 0)
//cout << "Changing row to 1.0 " << g_rows[cindex] << endl;
}
#ifdef TIMING_OUTPUT
app_time.start();
#endif
probeop.Apply(probevec, Scol);
#ifdef TIMING_OUTPUT
app_time.stop();
#endif
Scol.MaxValue(maxvalue);
for (int k = 0; k < nvectors; k++) //TODO:Need to switch these loops
{
cindex = k+i;
vecvalues = Scol[k];
//cout << "MAX" << maxvalue << endl;
for (int j = 0 ; j < g_localElems ; j++)
{
nentries = 0; // inserting one entry in each row for now
if (g_rows[cindex] == g_rows[j]) // diagonal entry
{
values[nentries] = vecvalues[j];
indices[nentries] = g_rows[cindex];
nentries++;
err = Sbar->InsertGlobalValues(g_rows[j], nentries, values,
indices);
assert(err >= 0);
err = lSGraph->InsertGlobalIndices(g_rows[j], nentries,
indices);
assert(err >= 0);
}
else if (abs(vecvalues[j]/maxvalue[k]) > relative_thres)
{
values[nentries] = vecvalues[j];
indices[nentries] = g_rows[cindex];
nentries++;
err = Sbar->InsertGlobalValues(g_rows[j], nentries, values,
indices);
assert(err >= 0);
err = lSGraph->InsertGlobalIndices(g_rows[j], nentries,
indices);
assert(err >= 0);
}
else
{
if (vecvalues[j] != 0.0)
{
dropped++;
//cout << "vecvalues[j]" << vecvalues[j] <<
// " max" << maxvalue[k] << endl;
}
}
}
}
}
probeop.ResetTempVectors(1);
for ( ; i < g_localElems ; i++)
{
示例5: abort
//.........这里部分代码省略.........
// Next Mj is allocated. Then, the second time, the
// arrays are actually populated.
nzM = 0; q = -1; l = 0;
int * indices;
int numEntries;
for( i = 0; i <= nrr; i++ ){
if( i >= r[l+1] ){
if( q > 0 ) std::qsort(colstack,q+1,sizeof(int),compare_ints); /* sort stack */
if( q >= 0 ) ns = 1; /* l, colstack[0] M */
for( j=1; j<=q ; j++ ){ /* delete copies */
if( colstack[j] > colstack[j-1] ) ++ns;
}
nzM += ns; /*M->p[l+1] = M->p[l] + ns;*/
++l;
q = -1;
}
if( i < nrr ){
B.ExtractMyRowView( i, numEntries, indices );
for( k = 0; k < numEntries; k++){
j = indices[k]; ns = 0; p = 0;
while( (r[bstree[p]] > j) || (j >= r[bstree[p]+1]) ){
if( r[bstree[p]] > j){
p = 2*p+1;
}else{
if( r[bstree[p]+1] <= j) p = 2*p+2;
}
++ns;
if( p > nbrr || ns > tree_height ) {
error = j;
std::printf("error: p %d nbrr %d ns %d %d\n",p,nbrr,ns,j); break;
}
}
colstack[++q] = bstree[p];
}
//if( error >-1 ){ std::printf("%d\n",error); break; }
// p > nbrr is a fatal error that is ignored
}
}
if ( matProc == myPID && verbose )
std::printf("nzM = %d \n", nzM );
Mi.resize( nzM );
Mj.resize( nzM );
q = -1; l = 0; pm = -1;
for( i = 0; i <= nrr; i++ ){
if( i >= r[l+1] ){
if( q > 0 ) std::qsort(colstack,q+1,sizeof(colstack[0]),compare_ints); /* sort stack */
if( q >= 0 ){
Mi[++pm] = l;
Mj[pm] = colstack[0];
}
for( j=1; j<=q ; j++ ){ /* delete copies */
if( colstack[j] > colstack[j-1] ){ /* l, colstack[j] */
Mi[++pm] = l;
Mj[pm] = colstack[j];
}
}
++l;
Mnum[l] = pm + 1;
/* sparse row format: M->p[l+1] = M->p[l] + ns; */
q = -1;
}
if( i < nrr ){
B.ExtractMyRowView( i, numEntries, indices );
for( k = 0; k < numEntries; k++){
j = indices[k]; ns = 0; p = 0;
while( (r[bstree[p]] > j) || (j >= r[bstree[p]+1]) ){
if( r[bstree[p]] > j){
p = 2*p+1;
}else{
if( r[bstree[p]+1] <= j) p = 2*p+2;
}
++ns;
}
colstack[++q] = bstree[p];
}
}
}
if ( bstree ) free ( bstree );
if ( colstack ) free( colstack );
// Compute weights as number of rows in each block.
weights.resize( nbrr );
for( l=0; l<nbrr; l++) weights[l] = r[l+1] - r[l];
// Compute Epetra_CrsGraph and return
Teuchos::RCP<Epetra_Map> newMap;
if ( matProc == myPID )
newMap = Teuchos::rcp( new Epetra_Map(nbrr, nbrr, 0, B.Comm() ) );
else
newMap = Teuchos::rcp( new Epetra_Map( nbrr, 0, 0, B.Comm() ) );
Teuchos::RCP<Epetra_CrsGraph> newGraph = Teuchos::rcp( new Epetra_CrsGraph( Copy, *newMap, 0 ) );
for( l=0; l<newGraph->NumMyRows(); l++) {
newGraph->InsertGlobalIndices( l, Mnum[l+1]-Mnum[l], &Mj[Mnum[l]] );
}
newGraph->FillComplete();
return (newGraph);
}
示例6: ttime
//.........这里部分代码省略.........
}
if (gvals[gid] == 1)
{ // D or C row
if (insertValues)
{
err = D->InsertGlobalValues(gid, lcnt, LeftValues, LeftIndex);
assert(err == 0);
err = C->InsertGlobalValues(gid, rcnt, RightValues, RightIndex);
assert(err == 0);
}
else
{
err = D->ReplaceMyValues(D->LRID(gid), lcnt, LeftValues,
LeftIndex);
assert(err == 0);
err = C->ReplaceMyValues(C->LRID(gid), rcnt, RightValues,
RightIndex);
assert(err == 0);
}
}
else
{ // R or S row
//assert(lcnt > 0); // TODO: Enable this once using narrow sep.
if (insertValues)
{
assert(rcnt > 0);
err = R->InsertGlobalValues(gid, lcnt, LeftValues, LeftIndex);
assert(err == 0);
err = G->InsertGlobalValues(gid, rcnt, RightValues, RightIndex);
assert(err == 0);
if (config->schurApproxMethod == 1)
{
err = Sg->InsertGlobalIndices(gid, rcnt, RightIndex);
assert(err == 0);
}
}
else
{
assert(rcnt > 0);
err = R->ReplaceMyValues(R->LRID(gid), lcnt, LeftValues,
LeftIndex);
assert(err == 0);
err = G->ReplaceMyValues(G->LRID(gid), rcnt, RightValues,
RightIndex);
assert(err == 0);
}
}
}
if (insertValues)
{
/* ------------- Create the maps for the DBBD form ------------------ */
Epetra_Map *DRowMap, *SRowMap, *DColMap;
Epetra_SerialComm LComm;
if (config->sep_type != 1)
{
DRowMap = new Epetra_Map(-1, data->Dnr, data->DRowElems, 0,
A->Comm());
SRowMap = new Epetra_Map(-1, data->Snr, data->SRowElems, 0,
A->Comm());
DColMap = new Epetra_Map(-1, data->Dnc, DColElems, 0,
A->Comm());
}
else
{
示例7: op
TEUCHOS_UNIT_TEST(interlaced_op, test)
{
#ifdef HAVE_MPI
Teuchos::RCP<const Epetra_Comm> comm = Teuchos::rcp(new Epetra_MpiComm(MPI_COMM_WORLD));
#else
Teuchos::RCP<const Epetra_Comm> comm = Teuchos::rcp(new Epetra_SerialComm);
#endif
//int rank = comm->MyPID();
int numProc = comm->NumProc();
int num_KL = 1;
int porder = 5;
bool full_expansion = false;
Teuchos::RCP<const Stokhos::CompletePolynomialBasis<int,double> > basis = buildBasis(num_KL,porder);
Teuchos::RCP<Stokhos::Sparse3Tensor<int,double> > Cijk;
Teuchos::RCP<Stokhos::ParallelData> sg_parallel_data;
Teuchos::RCP<Stokhos::OrthogPolyExpansion<int,double> > expansion;
{
if(full_expansion)
Cijk = basis->computeTripleProductTensor();
else
Cijk = basis->computeLinearTripleProductTensor();
Teuchos::ParameterList parallelParams;
parallelParams.set("Number of Spatial Processors", numProc);
sg_parallel_data = Teuchos::rcp(new Stokhos::ParallelData(basis, Cijk, comm,
parallelParams));
expansion = Teuchos::rcp(new Stokhos::AlgebraicOrthogPolyExpansion<int,double>(basis,
Cijk));
}
Teuchos::RCP<const EpetraExt::MultiComm> sg_comm = sg_parallel_data->getMultiComm();
// determinstic PDE graph
Teuchos::RCP<Epetra_Map> determRowMap = Teuchos::rcp(new Epetra_Map(-1,10,0,*comm));
Teuchos::RCP<Epetra_CrsGraph> determGraph = Teuchos::rcp(new Epetra_CrsGraph(Copy,*determRowMap,1));
for(int row=0;row<determRowMap->NumMyElements();row++) {
int gid = determRowMap->GID(row);
determGraph->InsertGlobalIndices(gid,1,&gid);
}
for(int row=1;row<determRowMap->NumMyElements()-1;row++) {
int gid = determRowMap->GID(row);
int indices[2] = {gid-1,gid+1};
determGraph->InsertGlobalIndices(gid,2,indices);
}
determGraph->FillComplete();
Teuchos::RCP<Teuchos::ParameterList> params = Teuchos::rcp(new Teuchos::ParameterList);
params->set("Scale Operator by Inverse Basis Norms", false);
params->set("Include Mean", true);
params->set("Only Use Linear Terms", false);
Teuchos::RCP<Stokhos::EpetraSparse3Tensor> epetraCijk =
Teuchos::rcp(new Stokhos::EpetraSparse3Tensor(basis,Cijk,sg_comm));
Teuchos::RCP<Stokhos::EpetraOperatorOrthogPoly> W_sg_blocks =
Teuchos::rcp(new Stokhos::EpetraOperatorOrthogPoly(basis, epetraCijk->getStochasticRowMap(), determRowMap, determRowMap, sg_comm));
for(int i=0; i<W_sg_blocks->size(); i++) {
Teuchos::RCP<Epetra_CrsMatrix> crsMat = Teuchos::rcp(new Epetra_CrsMatrix(Copy,*determGraph));
crsMat->PutScalar(1.0 + i);
W_sg_blocks->setCoeffPtr(i,crsMat); // allocate a bunch of matrices
}
Teuchos::RCP<const Epetra_Map> sg_map =
Teuchos::rcp(EpetraExt::BlockUtility::GenerateBlockMap(
*determRowMap, *(epetraCijk->getStochasticRowMap()),
*(epetraCijk->getMultiComm())));
// build an interlaced operator (object under test) and a benchmark
// fully assembled operator
///////////////////////////////////////////////////////////////////////
Stokhos::InterlacedOperator op(sg_comm,basis,epetraCijk,determGraph,params);
op.PutScalar(0.0);
op.setupOperator(W_sg_blocks);
Stokhos::FullyAssembledOperator full_op(sg_comm,basis,epetraCijk,determGraph,sg_map,sg_map,params);
full_op.PutScalar(0.0);
full_op.setupOperator(W_sg_blocks);
// here we test interlaced operator against the fully assembled operator
///////////////////////////////////////////////////////////////////////
bool result = true;
for(int i=0;i<100;i++) {
// build vector for fully assembled operator (blockwise)
Teuchos::RCP<Stokhos::EpetraVectorOrthogPoly> x_vec_blocks =
Teuchos::rcp(new Stokhos::EpetraVectorOrthogPoly(basis,epetraCijk->getStochasticRowMap(),determRowMap,epetraCijk->getMultiComm()));
Teuchos::RCP<Stokhos::EpetraVectorOrthogPoly> f_vec_blocks =
Teuchos::rcp(new Stokhos::EpetraVectorOrthogPoly(basis,epetraCijk->getStochasticRowMap(),determRowMap,epetraCijk->getMultiComm()));
Teuchos::RCP<Epetra_Vector> x_vec_blocked = x_vec_blocks->getBlockVector();
Teuchos::RCP<Epetra_Vector> f_vec_blocked = f_vec_blocks->getBlockVector();
x_vec_blocked->Random(); // build an initial vector
f_vec_blocked->PutScalar(0.0);
// build interlaced vectors
Teuchos::RCP<Epetra_Vector> x_vec_inter = Teuchos::rcp(new Epetra_Vector(op.OperatorDomainMap()));
Teuchos::RCP<Epetra_Vector> f_vec_inter = Teuchos::rcp(new Epetra_Vector(op.OperatorRangeMap()));
Teuchos::RCP<Epetra_Vector> f_vec_blk_inter = Teuchos::rcp(new Epetra_Vector(op.OperatorRangeMap()));
Stokhos::SGModelEvaluator_Interlaced::copyToInterlacedVector(*x_vec_blocks,*x_vec_inter); // copy random x to
//.........这里部分代码省略.........
示例8: setup
// Can't be a constructor because MPI will not be initialized
void setup() {
Epetra_Object::SetTracebackMode(2);
// Test tolerance
tol = 1.0e-12;
// Basis of dimension 3, order 5
const int d = 2;
const int p = 3;
Teuchos::Array< Teuchos::RCP<const Stokhos::OneDOrthogPolyBasis<int,double> > > bases(d);
for (int i=0; i<d; i++) {
bases[i] = Teuchos::rcp(new Stokhos::LegendreBasis<int,double>(p));
}
Teuchos::RCP<const Stokhos::CompletePolynomialBasis<int,double> > basis =
Teuchos::rcp(new Stokhos::CompletePolynomialBasis<int,double>(bases));
// Triple product tensor
Teuchos::RCP<Stokhos::Sparse3Tensor<int,double> > Cijk =
basis->computeTripleProductTensor(basis->size());
// Create a communicator for Epetra objects
Teuchos::RCP<const Epetra_Comm> globalComm;
#ifdef HAVE_MPI
globalComm = Teuchos::rcp(new Epetra_MpiComm(MPI_COMM_WORLD));
#else
globalComm = Teuchos::rcp(new Epetra_SerialComm);
#endif
// Create stochastic parallel distribution
int num_spatial_procs = -1;
int num_procs = globalComm->NumProc();
if (num_procs > 1)
num_spatial_procs = num_procs / 2;
Teuchos::ParameterList parallelParams;
parallelParams.set("Number of Spatial Processors", num_spatial_procs);
Teuchos::RCP<Stokhos::ParallelData> sg_parallel_data =
Teuchos::rcp(new Stokhos::ParallelData(basis, Cijk, globalComm,
parallelParams));
Teuchos::RCP<const EpetraExt::MultiComm> sg_comm =
sg_parallel_data->getMultiComm();
Teuchos::RCP<const Epetra_Comm> app_comm =
sg_parallel_data->getSpatialComm();
Teuchos::RCP<const Stokhos::EpetraSparse3Tensor> epetraCijk =
sg_parallel_data->getEpetraCijk();
// Deterministic domain map
const int num_x = 5;
Teuchos::RCP<Epetra_Map> x_map =
Teuchos::rcp(new Epetra_Map(num_x, 0, *app_comm));
// Deterministic column map
Teuchos::RCP<Epetra_Map> x_overlap_map =
Teuchos::rcp(new Epetra_LocalMap(num_x, 0, *app_comm));
// Deterministic range map
const int num_f = 3;
Teuchos::RCP<Epetra_Map> f_map =
Teuchos::rcp(new Epetra_Map(num_f, 0, *app_comm));
// Product domain & range maps
Teuchos::RCP<const Epetra_BlockMap> stoch_row_map =
epetraCijk->getStochasticRowMap();
sg_x_map = Teuchos::rcp(EpetraExt::BlockUtility::GenerateBlockMap(
*x_map, *stoch_row_map, *sg_comm));
sg_f_map = Teuchos::rcp(EpetraExt::BlockUtility::GenerateBlockMap(
*f_map, *stoch_row_map, *sg_comm));
// Deterministic matrix graph
const int num_indices = num_x;
Teuchos::RCP<Epetra_CrsGraph> graph =
Teuchos::rcp(new Epetra_CrsGraph(Copy, *f_map, num_indices));
int indices[num_indices];
for (int j=0; j<num_indices; j++)
indices[j] = x_overlap_map->GID(j);
for (int i=0; i<f_map->NumMyElements(); i++)
graph->InsertGlobalIndices(f_map->GID(i), num_indices, indices);
graph->FillComplete(*x_map, *f_map);
// Create matrix expansion
Teuchos::RCP<Epetra_BlockMap> sg_overlap_map =
Teuchos::rcp(new Epetra_LocalMap(
basis->size(), 0,
*(sg_parallel_data->getStochasticComm())));
Teuchos::RCP< Stokhos::EpetraOperatorOrthogPoly > mat_sg =
Teuchos::rcp(new Stokhos::EpetraOperatorOrthogPoly(
basis, sg_overlap_map, x_map, f_map, sg_f_map, sg_comm));
for (int block=0; block<basis->size(); block++) {
Teuchos::RCP<Epetra_CrsMatrix> mat =
Teuchos::rcp(new Epetra_CrsMatrix(Copy, *graph));
TEUCHOS_TEST_FOR_EXCEPTION(!mat->IndicesAreLocal(), std::logic_error,
"Indices are not local!");
double values[num_indices];
for (int i=0; i<f_map->NumMyElements(); i++) {
for (int j=0; j<num_indices; j++) {
indices[j] = x_overlap_map->GID(j);
values[j] = 0.1*(i+1)*(j+1)*(block+1);
}
mat->ReplaceMyValues(i, num_indices, values, indices);
//.........这里部分代码省略.........
示例9: comm
Teuchos::RCP<Epetra_CrsGraph>
create_epetra_graph(int numProcs, int localProc)
{
if (localProc == 0) {
std::cout << " creating Epetra_CrsGraph with un-even distribution..."
<< std::endl;
}
//create an Epetra_CrsGraph with rows spread un-evenly over
//processors.
Epetra_MpiComm comm(MPI_COMM_WORLD);
int local_num_rows = 800;
int nnz_per_row = local_num_rows/4+1;
int global_num_rows = numProcs*local_num_rows;
int mid_proc = numProcs/2;
bool num_procs_even = numProcs%2==0 ? true : false;
int adjustment = local_num_rows/2;
//adjust local_num_rows so that it's not equal on all procs.
if (localProc < mid_proc) {
local_num_rows -= adjustment;
}
else {
local_num_rows += adjustment;
}
//if numProcs is not an even number, undo the local_num_rows adjustment
//on one proc so that the total will still be correct.
if (localProc == numProcs-1) {
if (num_procs_even == false) {
local_num_rows -= adjustment;
}
}
//now we're ready to create a row-map.
Epetra_Map rowmap(global_num_rows, local_num_rows, 0, comm);
//create a graph
Teuchos::RCP<Epetra_CrsGraph> graph =
Teuchos::rcp(new Epetra_CrsGraph(Copy, rowmap, nnz_per_row));
std::vector<int> indices(nnz_per_row);
std::vector<double> coefs(nnz_per_row);
int err = 0;
for(int i=0; i<local_num_rows; ++i) {
int global_row = rowmap.GID(i);
int first_col = global_row - nnz_per_row/2;
if (first_col < 0) {
first_col = 0;
}
else if (first_col > (global_num_rows - nnz_per_row)) {
first_col = global_num_rows - nnz_per_row;
}
for(int j=0; j<nnz_per_row; ++j) {
indices[j] = first_col + j;
coefs[j] = 1.0;
}
err = graph->InsertGlobalIndices(global_row, nnz_per_row,
&indices[0]);
if (err < 0) {
throw Isorropia::Exception("create_epetra_graph: error inserting indices in graph");
}
}
err = graph->FillComplete();
if (err != 0) {
throw Isorropia::Exception("create_epetra_graph: error in graph.FillComplete()");
}
return(graph);
}