本文整理汇总了C++中MatMPIAIJSetPreallocation函数的典型用法代码示例。如果您正苦于以下问题:C++ MatMPIAIJSetPreallocation函数的具体用法?C++ MatMPIAIJSetPreallocation怎么用?C++ MatMPIAIJSetPreallocation使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MatMPIAIJSetPreallocation函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: PetscStrcmp
/***************** Array Access ********************/
int DA::createMatrix(Mat &M, MatType mtype, unsigned int dof) {
// first determine the size ...
unsigned int sz = 0;
if(m_bIamActive) {
sz = dof*(m_uiNodeSize + m_uiBoundaryNodeSize);
}//end if active
// now create the PETSc Mat
// The "parallel direct solver" matrix types like MATAIJSPOOLES are ALL gone in petsc-3.0.0
// Thus, I (Ilya Lashuk) "delete" all such checks for matrix type. Hope it is reasonable thing to do.
PetscBool isAij, isAijSeq, isAijPrl, isSuperLU, isSuperLU_Dist;
PetscStrcmp(mtype,MATAIJ,&isAij);
PetscStrcmp(mtype,MATSEQAIJ,&isAijSeq);
PetscStrcmp(mtype,MATMPIAIJ,&isAijPrl);
isSuperLU = PETSC_FALSE; // PetscStrcmp(mtype,MATSUPERLU,&isSuperLU);
isSuperLU_Dist = PETSC_FALSE; // PetscStrcmp(mtype,MATSUPERLU_DIST,&isSuperLU_Dist);
MatCreate(m_mpiCommAll, &M);
MatSetSizes(M, sz,sz, PETSC_DECIDE, PETSC_DECIDE);
MatSetType(M,mtype);
if(isAij || isAijSeq || isAijPrl || isSuperLU || isSuperLU_Dist) {
if(m_iNpesAll > 1) {
MatMPIAIJSetPreallocation(M, 53*dof , PETSC_NULL, 53*dof , PETSC_NULL);
}else {
MatSeqAIJSetPreallocation(M, 53*dof , PETSC_NULL);
}
}
return 0;
}//end function
示例2: StokesSetupMatBlock00
PetscErrorCode StokesSetupMatBlock00(Stokes *s)
{
PetscInt row, start, end, size, i, j;
PetscInt cols[5];
PetscScalar vals[5];
PetscErrorCode ierr;
PetscFunctionBeginUser;
/* A[0] is 2N-by-2N */
ierr = MatCreate(PETSC_COMM_WORLD,&s->subA[0]);CHKERRQ(ierr);
ierr = MatSetOptionsPrefix(s->subA[0],"a00_");CHKERRQ(ierr);
ierr = MatSetSizes(s->subA[0],PETSC_DECIDE,PETSC_DECIDE,2*s->nx*s->ny,2*s->nx*s->ny);CHKERRQ(ierr);
ierr = MatSetType(s->subA[0],MATMPIAIJ);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(s->subA[0],5,NULL,5,NULL);CHKERRQ(ierr);
ierr = MatGetOwnershipRange(s->subA[0], &start, &end);CHKERRQ(ierr);
for (row = start; row < end; row++) {
ierr = StokesGetPosition(s, row, &i, &j);CHKERRQ(ierr);
/* first part: rows 0 to (nx*ny-1) */
ierr = StokesStencilLaplacian(s, i, j, &size, cols, vals);CHKERRQ(ierr);
/* second part: rows (nx*ny) to (2*nx*ny-1) */
if (row >= s->nx*s->ny) {
for (i = 0; i < 5; i++) cols[i] = cols[i] + s->nx*s->ny;
}
for (i = 0; i < 5; i++) vals[i] = -1.0*vals[i]; /* dynamic viscosity coef mu=-1 */
ierr = MatSetValues(s->subA[0], 1, &row, size, cols, vals, INSERT_VALUES);CHKERRQ(ierr);
}
ierr = MatAssemblyBegin(s->subA[0], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(s->subA[0], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例3: DMCreateInterpolation_Redundant
static PetscErrorCode DMCreateInterpolation_Redundant(DM dmc,DM dmf,Mat *P,Vec *scale)
{
PetscErrorCode ierr;
DM_Redundant *redc = (DM_Redundant*)dmc->data;
DM_Redundant *redf = (DM_Redundant*)dmf->data;
PetscMPIInt flag;
PetscInt i,rstart,rend;
PetscFunctionBegin;
ierr = MPI_Comm_compare(PetscObjectComm((PetscObject)dmc),PetscObjectComm((PetscObject)dmf),&flag);CHKERRQ(ierr);
if (flag != MPI_CONGRUENT && flag != MPI_IDENT) SETERRQ(PetscObjectComm((PetscObject)dmf),PETSC_ERR_SUP,"cannot change communicators");
if (redc->rank != redf->rank) SETERRQ(PetscObjectComm((PetscObject)dmf),PETSC_ERR_ARG_INCOMP,"Owning rank does not match");
if (redc->N != redf->N) SETERRQ(PetscObjectComm((PetscObject)dmf),PETSC_ERR_ARG_INCOMP,"Global size does not match");
ierr = MatCreate(PetscObjectComm((PetscObject)dmc),P);CHKERRQ(ierr);
ierr = MatSetSizes(*P,redc->n,redc->n,redc->N,redc->N);CHKERRQ(ierr);
ierr = MatSetType(*P,MATAIJ);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(*P,1,0);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(*P,1,0,0,0);CHKERRQ(ierr);
ierr = MatGetOwnershipRange(*P,&rstart,&rend);CHKERRQ(ierr);
for (i=rstart; i<rend; i++) {ierr = MatSetValue(*P,i,i,1.0,INSERT_VALUES);CHKERRQ(ierr);}
ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
if (scale) {ierr = DMCreateInterpolationScale(dmc,dmf,*P,scale);CHKERRQ(ierr);}
PetscFunctionReturn(0);
}
示例4: MatCreate
PetscErrorCode cHamiltonianMatrix::hamiltonianConstruction(){
ierr = MatCreate(PETSC_COMM_WORLD,&Hpolaron);CHKERRQ(ierr);
ierr = MatSetType(Hpolaron,MATMPIAIJ);CHKERRQ(ierr);
ierr = MatSetSizes(Hpolaron,PETSC_DECIDE,PETSC_DECIDE,DIM,DIM);CHKERRQ(ierr);
// TODO: should be able to set the symmetric/hermitian option and
// only do upper-right triangle part of matrix construction .
// and perform corresponding operations thereon.
// ierr = MatSetOption(Hpolaron,MAT_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr);
// ierr = MatSetOption(Hpolaron,MAT_HERMITIAN,PETSC_TRUE);CHKERRQ(ierr);
// TODO: what is the estimate of the pre-allocation?
// -- number of nonzeros per row in DIAGONAL portion of local submatrix
// (same value is used for all local rows) ? I put dim temporarily here.
// number of nonzeros per row in the OFF-DIAGONAL portion of local submatrix
// (same value is used for all local rows) ? I put dim temporarily here..
// More details at http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Mat/MatMPIAIJSetPreallocation.html
ierr = MatMPIAIJSetPreallocation(Hpolaron,DIM,NULL,DIM,NULL);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(Hpolaron,DIM,NULL);CHKERRQ(ierr);
ierr = MatGetOwnershipRange(Hpolaron,&rstart,&rend);CHKERRQ(ierr);
ierr = MatGetLocalSize(Hpolaron,&nlocal, NULL);CHKERRQ(ierr);
ierr = assemblance();CHKERRQ(ierr);
ierr = MatAssemblyBegin(Hpolaron,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(Hpolaron,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
// ierr = PetscViewerSetFormat(PETSC_VIEWER_STDOUT_WORLD, PETSC_VIEWER_ASCII_DENSE );CHKERRQ(ierr);
// ierr = PetscViewerSetFormat(PETSC_VIEWER_STDOUT_WORLD, PETSC_VIEWER_ASCII_MATLAB );CHKERRQ(ierr);
// ierr = MatView(Hpolaron, PETSC_VIEWER_STDOUT_WORLD );CHKERRQ(ierr);
return ierr;
}
示例5: StokesSetupMatFp
PetscErrorCode StokesSetupMatFp(Stokes *s)
{
PetscInt row, start, end, size, i, j;
PetscInt cols[5];
PetscScalar vals[5];
PetscErrorCode ierr;
PetscFunctionBeginUser;
// Fp is N-by-N
ierr = MatCreate(PETSC_COMM_WORLD,&s->Fp);CHKERRQ(ierr);
ierr = MatSetOptionsPrefix(s->Fp,"Fp_");CHKERRQ(ierr);
ierr = MatSetSizes(s->Fp,PETSC_DECIDE,PETSC_DECIDE,s->nx*s->ny,s->nx*s->ny);CHKERRQ(ierr);
ierr = MatSetType(s->Fp,MATMPIAIJ);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(s->Fp,5,NULL,5,NULL);CHKERRQ(ierr);
ierr = MatGetOwnershipRange(s->Fp, &start, &end);CHKERRQ(ierr);
for (row = start; row < end; row++) {
ierr = StokesGetPosition(s, row, &i, &j);CHKERRQ(ierr);
ierr = StokesStencilLaplacian(s, i, j, &size, cols, vals);CHKERRQ(ierr);
for (i = 0; i < 5; i++) vals[i] = -1.0*vals[i]; //* dynamic viscosity coef mu=-1
ierr = MatSetValues(s->Fp, 1, &row, size, cols, vals, INSERT_VALUES);CHKERRQ(ierr);
}
ierr = MatAssemblyBegin(s->Fp, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(s->Fp, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例6: StokesSetupMatBlock01
PetscErrorCode StokesSetupMatBlock01(Stokes *s)
{
PetscInt row, start, end, size, i, j;
PetscInt cols[5];
PetscScalar vals[5];
PetscErrorCode ierr;
PetscFunctionBeginUser;
/* A[1] is 2N-by-N */
ierr = MatCreate(PETSC_COMM_WORLD, &s->subA[1]);CHKERRQ(ierr);
ierr = MatSetOptionsPrefix(s->subA[1],"a01_");
ierr = MatSetSizes(s->subA[1],PETSC_DECIDE,PETSC_DECIDE,2*s->nx*s->ny,s->nx*s->ny);CHKERRQ(ierr);
ierr = MatSetType(s->subA[1],MATMPIAIJ);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(s->subA[1],5,NULL,5,NULL);CHKERRQ(ierr);
ierr = MatGetOwnershipRange(s->subA[1],&start,&end);CHKERRQ(ierr);
ierr = MatSetOption(s->subA[1],MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE);CHKERRQ(ierr);
for (row = start; row < end; row++) {
ierr = StokesGetPosition(s, row, &i, &j);CHKERRQ(ierr);
/* first part: rows 0 to (nx*ny-1) */
if (row < s->nx*s->ny) {
ierr = StokesStencilGradientX(s, i, j, &size, cols, vals);CHKERRQ(ierr);
}
/* second part: rows (nx*ny) to (2*nx*ny-1) */
else {
ierr = StokesStencilGradientY(s, i, j, &size, cols, vals);CHKERRQ(ierr);
}
ierr = MatSetValues(s->subA[1], 1, &row, size, cols, vals, INSERT_VALUES);CHKERRQ(ierr);
}
ierr = MatAssemblyBegin(s->subA[1], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(s->subA[1], MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例7: KSPComputeEigenvaluesExplicitly
/*@
KSPComputeExplicitOperator - Computes the explicit preconditioned operator.
Collective on KSP
Input Parameter:
. ksp - the Krylov subspace context
Output Parameter:
. mat - the explict preconditioned operator
Notes:
This computation is done by applying the operators to columns of the
identity matrix.
Currently, this routine uses a dense matrix format when 1 processor
is used and a sparse format otherwise. This routine is costly in general,
and is recommended for use only with relatively small systems.
Level: advanced
.keywords: KSP, compute, explicit, operator
.seealso: KSPComputeEigenvaluesExplicitly(), PCComputeExplicitOperator()
@*/
PetscErrorCode KSPComputeExplicitOperator(KSP ksp,Mat *mat)
{
Vec in,out;
PetscErrorCode ierr;
PetscMPIInt size;
PetscInt i,M,m,*rows,start,end;
Mat A;
MPI_Comm comm;
PetscScalar *array,one = 1.0;
PetscFunctionBegin;
PetscValidHeaderSpecific(ksp,KSP_CLASSID,1);
PetscValidPointer(mat,2);
ierr = PetscObjectGetComm((PetscObject)ksp,&comm);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
ierr = VecDuplicate(ksp->vec_sol,&in);CHKERRQ(ierr);
ierr = VecDuplicate(ksp->vec_sol,&out);CHKERRQ(ierr);
ierr = VecGetSize(in,&M);CHKERRQ(ierr);
ierr = VecGetLocalSize(in,&m);CHKERRQ(ierr);
ierr = VecGetOwnershipRange(in,&start,&end);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&rows);CHKERRQ(ierr);
for (i=0; i<m; i++) rows[i] = start + i;
ierr = MatCreate(comm,mat);CHKERRQ(ierr);
ierr = MatSetSizes(*mat,m,m,M,M);CHKERRQ(ierr);
if (size == 1) {
ierr = MatSetType(*mat,MATSEQDENSE);CHKERRQ(ierr);
ierr = MatSeqDenseSetPreallocation(*mat,NULL);CHKERRQ(ierr);
} else {
ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(*mat,0,NULL,0,NULL);CHKERRQ(ierr);
}
ierr = MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr);
if (!ksp->pc) {ierr = KSPGetPC(ksp,&ksp->pc);CHKERRQ(ierr);}
ierr = PCGetOperators(ksp->pc,&A,NULL);CHKERRQ(ierr);
for (i=0; i<M; i++) {
ierr = VecSet(in,0.0);CHKERRQ(ierr);
ierr = VecSetValues(in,1,&i,&one,INSERT_VALUES);CHKERRQ(ierr);
ierr = VecAssemblyBegin(in);CHKERRQ(ierr);
ierr = VecAssemblyEnd(in);CHKERRQ(ierr);
ierr = KSP_MatMult(ksp,A,in,out);CHKERRQ(ierr);
ierr = KSP_PCApply(ksp,out,in);CHKERRQ(ierr);
ierr = VecGetArray(in,&array);CHKERRQ(ierr);
ierr = MatSetValues(*mat,m,rows,1,&i,array,INSERT_VALUES);CHKERRQ(ierr);
ierr = VecRestoreArray(in,&array);CHKERRQ(ierr);
}
ierr = PetscFree(rows);CHKERRQ(ierr);
ierr = VecDestroy(&in);CHKERRQ(ierr);
ierr = VecDestroy(&out);CHKERRQ(ierr);
ierr = MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例8: main
int main(int argc, char **args)
{
Mat A;
MatPartitioning part;
IS is;
PetscInt r,N = 10, start, end;
PetscErrorCode ierr;
ierr = PetscInitialize(&argc, &args, (char *) 0, help);CHKERRQ(ierr);
ierr = PetscOptionsGetInt(PETSC_NULL, "-N", &N, PETSC_NULL);CHKERRQ(ierr);
ierr = MatCreate(PETSC_COMM_WORLD, &A);CHKERRQ(ierr);
ierr = MatSetSizes(A, PETSC_DECIDE, PETSC_DECIDE, N, N);CHKERRQ(ierr);
ierr = MatSetFromOptions(A);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(A, 3, PETSC_NULL);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(A, 3, PETSC_NULL, 2, PETSC_NULL);CHKERRQ(ierr);
// Create a linear mesh
ierr = MatGetOwnershipRange(A, &start, &end);CHKERRQ(ierr);
for(r = start; r < end; ++r) {
if (r == 0) {
PetscInt cols[2];
PetscScalar vals[2];
cols[0] = r; cols[1] = r+1;
vals[0] = 1.0; vals[1] = 1.0;
ierr = MatSetValues(A, 1, &r, 2, cols, vals, INSERT_VALUES);CHKERRQ(ierr);
} else if (r == N-1) {
PetscInt cols[2];
PetscScalar vals[2];
cols[0] = r-1; cols[1] = r;
vals[0] = 1.0; vals[1] = 1.0;
ierr = MatSetValues(A, 1, &r, 2, cols, vals, INSERT_VALUES);CHKERRQ(ierr);
} else {
PetscInt cols[3];
PetscScalar vals[3];
cols[0] = r-1; cols[1] = r; cols[2] = r+1;
vals[0] = 1.0; vals[1] = 1.0; vals[2] = 1.0;
ierr = MatSetValues(A, 1, &r, 3, cols, vals, INSERT_VALUES);CHKERRQ(ierr);
}
}
ierr = MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatPartitioningCreate(PETSC_COMM_WORLD, &part);CHKERRQ(ierr);
ierr = MatPartitioningSetAdjacency(part, A);CHKERRQ(ierr);
ierr = MatPartitioningSetFromOptions(part);CHKERRQ(ierr);
//ierr = MatPartitioningSetVertexWeights(part, const PetscInt weights[]);CHKERRQ(ierr);
//ierr = MatPartitioningSetPartitionWeights(part,const PetscReal weights[]);CHKERRQ(ierr);
ierr = MatPartitioningApply(part, &is);CHKERRQ(ierr);
ierr = ISView(is, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
ierr = ISDestroy(&is);CHKERRQ(ierr);
ierr = MatPartitioningDestroy(&part);CHKERRQ(ierr);
ierr = MatDestroy(&A);CHKERRQ(ierr);
ierr = PetscFinalize();
return 0;
}
示例9: main
int main(int argc, char **args)
{
Mat A;
MatPartitioning part;
IS is;
PetscInt i,m,N,rstart,rend,nemptyranks,*emptyranks,nbigranks,*bigranks;
PetscMPIInt rank,size;
PetscErrorCode ierr;
ierr = PetscInitialize(&argc,&args,(char*)0,help);CHKERRQ(ierr);
ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
nemptyranks = 10;
nbigranks = 10;
ierr = PetscMalloc2(nemptyranks,PetscInt,&emptyranks,nbigranks,PetscInt,&bigranks);CHKERRQ(ierr);
ierr = PetscOptionsBegin(PETSC_COMM_WORLD,PETSC_NULL,"Partitioning example options",PETSC_NULL);CHKERRQ(ierr);
ierr = PetscOptionsIntArray("-emptyranks","Ranks to be skipped by partition","",emptyranks,&nemptyranks,PETSC_NULL);CHKERRQ(ierr);
ierr = PetscOptionsIntArray("-bigranks","Ranks to be overloaded","",bigranks,&nbigranks,PETSC_NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
m = 1;
for (i=0; i<nemptyranks; i++) if (rank == emptyranks[i]) m = 0;
for (i=0; i<nbigranks; i++) if (rank == bigranks[i]) m = 5;
ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr);
ierr = MatSetSizes(A,m,m,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
ierr = MatSetFromOptions(A);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(A,3,PETSC_NULL);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(A,3,PETSC_NULL,2,PETSC_NULL);CHKERRQ(ierr);
ierr = MatSeqBAIJSetPreallocation(A,1,3,PETSC_NULL);CHKERRQ(ierr);
ierr = MatMPIBAIJSetPreallocation(A,1,3,PETSC_NULL,2,PETSC_NULL);CHKERRQ(ierr);
ierr = MatSeqSBAIJSetPreallocation(A,1,2,PETSC_NULL);CHKERRQ(ierr);
ierr = MatMPISBAIJSetPreallocation(A,1,2,PETSC_NULL,1,PETSC_NULL);CHKERRQ(ierr);
ierr = MatGetSize(A,PETSC_NULL,&N);CHKERRQ(ierr);
ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr);
for (i=rstart; i<rend; i++) {
const PetscInt cols[] = {(i+N-1)%N,i,(i+1)%N};
const PetscScalar vals[] = {1,1,1};
ierr = MatSetValues(A,1,&i,3,cols,vals,INSERT_VALUES);CHKERRQ(ierr);
}
ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
ierr = MatPartitioningCreate(PETSC_COMM_WORLD,&part);CHKERRQ(ierr);
ierr = MatPartitioningSetAdjacency(part,A);CHKERRQ(ierr);
ierr = MatPartitioningSetFromOptions(part);CHKERRQ(ierr);
ierr = MatPartitioningApply(part,&is);CHKERRQ(ierr);
ierr = ISView(is,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
ierr = ISDestroy(&is);CHKERRQ(ierr);
ierr = MatPartitioningDestroy(&part);CHKERRQ(ierr);
ierr = MatDestroy(&A);CHKERRQ(ierr);
ierr = PetscFree2(emptyranks,bigranks);CHKERRQ(ierr);
ierr = PetscFinalize();
return 0;
}
示例10: myinterp
PetscErrorCode myinterp(MPI_Comm comm, Mat *Aout, int Nr, int Nz, int Mr, int Mz, int mr, int mz, int Mzslab)
{
//(mr,mz) specifies the origin of the DOF region.
//DOF region is a rectangle spanning diagonally from (mr,mz) to (mr+Mr-1,mz+Mz-1).
//We keep the z invariant.
Mat A;
int nz = 1; /* max # nonzero elements in each row */
PetscErrorCode ierr;
int ns, ne;
int i;
int DegFree= (Mzslab==0)*Mr*Mz + (Mzslab==1)*Mr + (Mzslab==2)*Mz;
MatCreate(comm, &A);
MatSetType(A,MATMPIAIJ);
MatSetSizes(A,PETSC_DECIDE, PETSC_DECIDE, 6*Nr*Nz, DegFree);
MatMPIAIJSetPreallocation(A, nz, PETSC_NULL, nz, PETSC_NULL);
ierr = MatGetOwnershipRange(A, &ns, &ne); CHKERRQ(ierr);
double shift=0.5;
for (i = ns; i < ne; ++i) {
int ir, iz, ic;
double rd, zd;
int ird, izd;
int j, id;
iz = (j = i) % Nz;
ir = (j /= Nz) % Nr;
ic = (j /= Nr) % 3;
rd = (ir-mr) + (ic!= 0)*shift;
ird = ceil(rd-0.5);
if (ird < 0 || ird >= Mr) continue;
zd = (iz-mz) + (ic!= 2)*shift;
izd = ceil(zd-0.5);
if (izd < 0 || izd >= Mz) continue;
if (Mzslab==1) {
id = ird;
}else if (Mzslab==2){
id = izd;
}else{
id = izd + Mz * ird;
}
ierr = MatSetValue(A, i, id, 1.0, INSERT_VALUES); CHKERRQ(ierr);
}
ierr = MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr);
ierr = MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject) A, "InterpMatrix"); CHKERRQ(ierr);
*Aout = A;
PetscFunctionReturn(0);
}
示例11: ConvertMatrixToMat
PetscErrorCode ConvertMatrixToMat(MPI_Comm comm,matrix *B,Mat *PB)
{
PetscMPIInt size,rank;
PetscErrorCode ierr;
int m,n,M,N;
int d_nz,o_nz;
int *d_nnz,*o_nnz;
int i,k,global_row,global_col,first_diag_col,last_diag_col;
PetscScalar val;
PetscFunctionBegin;
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
m = n = B->mnls[rank];
d_nz = o_nz = 0;
/* Determine preallocation for MatCreateMPIAIJ */
ierr = PetscMalloc(m*sizeof(PetscInt),&d_nnz);CHKERRQ(ierr);
ierr = PetscMalloc(m*sizeof(PetscInt),&o_nnz);CHKERRQ(ierr);
for (i=0; i<m; i++) d_nnz[i] = o_nnz[i] = 0;
first_diag_col = B->start_indices[rank];
last_diag_col = first_diag_col + B->mnls[rank];
for (i=0; i<B->mnls[rank]; i++) {
for (k=0; k<B->lines->len[i]; k++) {
global_col = B->lines->ptrs[i][k];
if ((global_col >= first_diag_col) && (global_col < last_diag_col))
d_nnz[i]++;
else
o_nnz[i]++;
}
}
M = N = B->n;
/* Here we only know how to create AIJ format */
ierr = MatCreate(comm,PB);CHKERRQ(ierr);
ierr = MatSetSizes(*PB,m,n,M,N);CHKERRQ(ierr);
ierr = MatSetType(*PB,MATAIJ);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(*PB,d_nz,d_nnz);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(*PB,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
for (i=0; i<B->mnls[rank]; i++) {
global_row = B->start_indices[rank]+i;
for (k=0; k<B->lines->len[i]; k++) {
global_col = B->lines->ptrs[i][k];
val = B->lines->A[i][k];
ierr = MatSetValues(*PB,1,&global_row,1,&global_col,&val,ADD_VALUES);CHKERRQ(ierr);
}
}
ierr = PetscFree(d_nnz);CHKERRQ(ierr);
ierr = PetscFree(o_nnz);CHKERRQ(ierr);
ierr = MatAssemblyBegin(*PB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(*PB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例12: fill_petsc
/*! \brief Fill the petsc Matrix
*
*
*/
void fill_petsc()
{
d_nnz.resize(l_row);
o_nnz.resize(l_row);
d_nnz.fill(0);
o_nnz.fill(0);
// Here we explore every row to count how many non zero we have in the diagonal matrix part,
// and the non diagonal matrix part, needed by MatMPIAIJSetPreallocation
size_t i = 0;
// Set the Matrix from triplet
while (i < trpl.size())
{
PetscInt row = trpl.get(i).row();
while(row == trpl.get(i).row() && i < trpl.size())
{
if ((size_t)trpl.get(i).col() >= start_row && (size_t)trpl.get(i).col() < start_row + l_row)
d_nnz.get(row - start_row)++;
else
o_nnz.get(row - start_row)++;
i++;
}
}
PETSC_SAFE_CALL(MatMPIAIJSetPreallocation(mat,0,static_cast<const PetscInt*>(d_nnz.getPointer()),0,
static_cast<const PetscInt*>(o_nnz.getPointer())));
// Counter i is zero
i = 0;
// Set the Matrix from triplet
while (i < trpl.size())
{
vals.clear();
cols.clear();
PetscInt row = trpl.get(i).row();
while(row == trpl.get(i).row() && i < trpl.size())
{
vals.add(trpl.get(i).value());
cols.add(trpl.get(i).col());
i++;
}
PETSC_SAFE_CALL(MatSetValues(mat,1,&row,cols.size(),static_cast<const PetscInt*>(cols.getPointer()),
static_cast<const PetscScalar *>(vals.getPointer()),
INSERT_VALUES));
}
PETSC_SAFE_CALL(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY));
PETSC_SAFE_CALL(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY));
}
示例13: MatComputeExplicitOperator
/*@
MatComputeExplicitOperator - Computes the explicit matrix
Collective on Mat
Input Parameter:
. inmat - the matrix
Output Parameter:
. mat - the explict preconditioned operator
Notes:
This computation is done by applying the operators to columns of the
identity matrix.
Currently, this routine uses a dense matrix format when 1 processor
is used and a sparse format otherwise. This routine is costly in general,
and is recommended for use only with relatively small systems.
Level: advanced
.keywords: Mat, compute, explicit, operator
@*/
PetscErrorCode MatComputeExplicitOperator(Mat inmat,Mat *mat)
{
Vec in,out;
PetscErrorCode ierr;
PetscInt i,m,n,M,N,*rows,start,end;
MPI_Comm comm;
PetscScalar *array,zero = 0.0,one = 1.0;
PetscMPIInt size;
PetscFunctionBegin;
PetscValidHeaderSpecific(inmat,MAT_CLASSID,1);
PetscValidPointer(mat,2);
ierr = PetscObjectGetComm((PetscObject)inmat,&comm);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
ierr = MatGetLocalSize(inmat,&m,&n);CHKERRQ(ierr);
ierr = MatGetSize(inmat,&M,&N);CHKERRQ(ierr);
ierr = MatGetVecs(inmat,&in,&out);CHKERRQ(ierr);
ierr = VecSetOption(in,VEC_IGNORE_OFF_PROC_ENTRIES,PETSC_TRUE);CHKERRQ(ierr);
ierr = VecGetOwnershipRange(out,&start,&end);CHKERRQ(ierr);
ierr = PetscMalloc(m*sizeof(PetscInt),&rows);CHKERRQ(ierr);
for (i=0; i<m; i++) rows[i] = start + i;
ierr = MatCreate(comm,mat);CHKERRQ(ierr);
ierr = MatSetSizes(*mat,m,n,M,N);CHKERRQ(ierr);
if (size == 1) {
ierr = MatSetType(*mat,MATSEQDENSE);CHKERRQ(ierr);
ierr = MatSeqDenseSetPreallocation(*mat,NULL);CHKERRQ(ierr);
} else {
ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(*mat,n,NULL,N-n,NULL);CHKERRQ(ierr);
}
for (i=0; i<N; i++) {
ierr = VecSet(in,zero);CHKERRQ(ierr);
ierr = VecSetValues(in,1,&i,&one,INSERT_VALUES);CHKERRQ(ierr);
ierr = VecAssemblyBegin(in);CHKERRQ(ierr);
ierr = VecAssemblyEnd(in);CHKERRQ(ierr);
ierr = MatMult(inmat,in,out);CHKERRQ(ierr);
ierr = VecGetArray(out,&array);CHKERRQ(ierr);
ierr = MatSetValues(*mat,m,rows,1,&i,array,INSERT_VALUES);CHKERRQ(ierr);
ierr = VecRestoreArray(out,&array);CHKERRQ(ierr);
}
ierr = PetscFree(rows);CHKERRQ(ierr);
ierr = VecDestroy(&out);CHKERRQ(ierr);
ierr = VecDestroy(&in);CHKERRQ(ierr);
ierr = MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例14: MatSeqAIJSetPreallocation
/*@
MatXAIJSetPreallocation - set preallocation for serial and parallel AIJ, BAIJ, and SBAIJ matrices
Collective on Mat
Input Arguments:
+ A - matrix being preallocated
. bs - block size
. dnnz - number of nonzero blocks per block row of diagonal part of parallel matrix
. onnz - number of nonzero blocks per block row of off-diagonal part of parallel matrix
. dnnzu - number of nonzero blocks per block row of upper-triangular part of diagonal part of parallel matrix
- onnzu - number of nonzero blocks per block row of upper-triangular part of off-diagonal part of parallel matrix
Level: beginner
.seealso: MatSeqAIJSetPreallocation(), MatMPIAIJSetPreallocation(), MatSeqBAIJSetPreallocation(), MatMPIBAIJSetPreallocation(), MatSeqSBAIJSetPreallocation(), MatMPISBAIJSetPreallocation(),
PetscSplitOwnership()
@*/
PetscErrorCode MatXAIJSetPreallocation(Mat A,PetscInt bs,const PetscInt dnnz[],const PetscInt onnz[],const PetscInt dnnzu[],const PetscInt onnzu[])
{
PetscErrorCode ierr;
void (*aij)(void);
PetscFunctionBegin;
ierr = MatSetBlockSize(A,bs);CHKERRQ(ierr);
ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr);
ierr = MatSeqBAIJSetPreallocation(A,bs,0,dnnz);CHKERRQ(ierr);
ierr = MatMPIBAIJSetPreallocation(A,bs,0,dnnz,0,onnz);CHKERRQ(ierr);
ierr = MatSeqSBAIJSetPreallocation(A,bs,0,dnnzu);CHKERRQ(ierr);
ierr = MatMPISBAIJSetPreallocation(A,bs,0,dnnzu,0,onnzu);CHKERRQ(ierr);
/*
In general, we have to do extra work to preallocate for scalar (AIJ) matrices so we check whether it will do any
good before going on with it.
*/
ierr = PetscObjectQueryFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",&aij);CHKERRQ(ierr);
if (!aij) {
ierr = PetscObjectQueryFunction((PetscObject)A,"MatSeqAIJSetPreallocation_C",&aij);CHKERRQ(ierr);
}
if (aij) {
if (bs == 1) {
ierr = MatSeqAIJSetPreallocation(A,0,dnnz);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(A,0,dnnz,0,onnz);CHKERRQ(ierr);
} else { /* Convert block-row precallocation to scalar-row */
PetscInt i,m,*sdnnz,*sonnz;
ierr = MatGetLocalSize(A,&m,NULL);CHKERRQ(ierr);
ierr = PetscMalloc2((!!dnnz)*m,&sdnnz,(!!onnz)*m,&sonnz);CHKERRQ(ierr);
for (i=0; i<m; i++) {
if (dnnz) sdnnz[i] = dnnz[i/bs] * bs;
if (onnz) sonnz[i] = onnz[i/bs] * bs;
}
ierr = MatSeqAIJSetPreallocation(A,0,dnnz ? sdnnz : NULL);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(A,0,dnnz ? sdnnz : NULL,0,onnz ? sonnz : NULL);CHKERRQ(ierr);
ierr = PetscFree2(sdnnz,sonnz);CHKERRQ(ierr);
}
}
PetscFunctionReturn(0);
}
示例15: matrix
/*@
STComputeExplicitOperator - Computes the explicit operator associated
to the eigenvalue problem with the specified spectral transformation.
Collective on ST
Input Parameter:
. st - the spectral transform context
Output Parameter:
. mat - the explicit operator
Notes:
This routine builds a matrix containing the explicit operator. For
example, in generalized problems with shift-and-invert spectral
transformation the result would be matrix (A - s B)^-1 B.
This computation is done by applying the operator to columns of the
identity matrix. This is analogous to MatComputeExplicitOperator().
Level: advanced
.seealso: STApply()
@*/
PetscErrorCode STComputeExplicitOperator(ST st,Mat *mat)
{
PetscErrorCode ierr;
Vec in,out;
PetscInt i,M,m,*rows,start,end;
const PetscScalar *array;
PetscScalar one = 1.0;
PetscMPIInt size;
PetscFunctionBegin;
PetscValidHeaderSpecific(st,ST_CLASSID,1);
PetscValidPointer(mat,2);
STCheckMatrices(st,1);
if (st->nmat>2) SETERRQ(PetscObjectComm((PetscObject)st),PETSC_ERR_ARG_WRONGSTATE,"Can only be used with 1 or 2 matrices");
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)st),&size);CHKERRQ(ierr);
ierr = MatGetVecs(st->A[0],&in,&out);CHKERRQ(ierr);
ierr = VecGetSize(out,&M);CHKERRQ(ierr);
ierr = VecGetLocalSize(out,&m);CHKERRQ(ierr);
ierr = VecSetOption(in,VEC_IGNORE_OFF_PROC_ENTRIES,PETSC_TRUE);CHKERRQ(ierr);
ierr = VecGetOwnershipRange(out,&start,&end);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&rows);CHKERRQ(ierr);
for (i=0;i<m;i++) rows[i] = start + i;
ierr = MatCreate(PetscObjectComm((PetscObject)st),mat);CHKERRQ(ierr);
ierr = MatSetSizes(*mat,m,m,M,M);CHKERRQ(ierr);
if (size == 1) {
ierr = MatSetType(*mat,MATSEQDENSE);CHKERRQ(ierr);
ierr = MatSeqDenseSetPreallocation(*mat,NULL);CHKERRQ(ierr);
} else {
ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(*mat,m,NULL,M-m,NULL);CHKERRQ(ierr);
}
for (i=0;i<M;i++) {
ierr = VecSet(in,0.0);CHKERRQ(ierr);
ierr = VecSetValues(in,1,&i,&one,INSERT_VALUES);CHKERRQ(ierr);
ierr = VecAssemblyBegin(in);CHKERRQ(ierr);
ierr = VecAssemblyEnd(in);CHKERRQ(ierr);
ierr = STApply(st,in,out);CHKERRQ(ierr);
ierr = VecGetArrayRead(out,&array);CHKERRQ(ierr);
ierr = MatSetValues(*mat,m,rows,1,&i,array,INSERT_VALUES);CHKERRQ(ierr);
ierr = VecRestoreArrayRead(out,&array);CHKERRQ(ierr);
}
ierr = PetscFree(rows);CHKERRQ(ierr);
ierr = VecDestroy(&in);CHKERRQ(ierr);
ierr = VecDestroy(&out);CHKERRQ(ierr);
ierr = MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}