本文整理汇总了C++中PetscFree函数的典型用法代码示例。如果您正苦于以下问题:C++ PetscFree函数的具体用法?C++ PetscFree怎么用?C++ PetscFree使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PetscFree函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: VecLoad_Binary
PetscErrorCode VecLoad_Binary(Vec vec, PetscViewer viewer)
{
PetscMPIInt size,rank,tag;
int fd;
PetscInt i,rows = 0,n,*range,N,bs;
PetscErrorCode ierr;
PetscBool flag;
PetscScalar *avec,*avecwork;
MPI_Comm comm;
MPI_Request request;
MPI_Status status;
#if defined(PETSC_HAVE_MPIIO)
PetscBool useMPIIO;
#endif
PetscFunctionBegin;
ierr = PetscObjectGetComm((PetscObject)viewer,&comm);CHKERRQ(ierr);
ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
ierr = PetscViewerBinaryReadVecHeader_Private(viewer,&rows);CHKERRQ(ierr);
/* Set Vec sizes,blocksize,and type if not already set. Block size first so that local sizes will be compatible. */
ierr = PetscOptionsGetInt(((PetscObject)vec)->prefix, "-vecload_block_size", &bs, &flag);CHKERRQ(ierr);
if (flag) {
ierr = VecSetBlockSize(vec, bs);CHKERRQ(ierr);
}
if (vec->map->n < 0 && vec->map->N < 0) {
ierr = VecSetSizes(vec,PETSC_DECIDE,rows);CHKERRQ(ierr);
}
/* If sizes and type already set,check if the vector global size is correct */
ierr = VecGetSize(vec, &N);CHKERRQ(ierr);
if (N != rows) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Vector in file different length (%d) then input vector (%d)", rows, N);
#if defined(PETSC_HAVE_MPIIO)
ierr = PetscViewerBinaryGetMPIIO(viewer,&useMPIIO);CHKERRQ(ierr);
if (useMPIIO) {
ierr = VecLoad_Binary_MPIIO(vec, viewer);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#endif
ierr = VecGetLocalSize(vec,&n);CHKERRQ(ierr);
ierr = PetscObjectGetNewTag((PetscObject)viewer,&tag);CHKERRQ(ierr);
ierr = VecGetArray(vec,&avec);CHKERRQ(ierr);
if (!rank) {
ierr = PetscBinaryRead(fd,avec,n,PETSC_SCALAR);CHKERRQ(ierr);
if (size > 1) {
/* read in other chuncks and send to other processors */
/* determine maximum chunck owned by other */
range = vec->map->range;
n = 1;
for (i=1; i<size; i++) n = PetscMax(n,range[i+1] - range[i]);
ierr = PetscMalloc1(n,&avecwork);CHKERRQ(ierr);
for (i=1; i<size; i++) {
n = range[i+1] - range[i];
ierr = PetscBinaryRead(fd,avecwork,n,PETSC_SCALAR);CHKERRQ(ierr);
ierr = MPI_Isend(avecwork,n,MPIU_SCALAR,i,tag,comm,&request);CHKERRQ(ierr);
ierr = MPI_Wait(&request,&status);CHKERRQ(ierr);
}
ierr = PetscFree(avecwork);CHKERRQ(ierr);
}
} else {
ierr = MPI_Recv(avec,n,MPIU_SCALAR,0,tag,comm,&status);CHKERRQ(ierr);
}
ierr = VecRestoreArray(vec,&avec);CHKERRQ(ierr);
ierr = VecAssemblyBegin(vec);CHKERRQ(ierr);
ierr = VecAssemblyEnd(vec);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例2: MatGetSubMatrices_MPIAdj_Private
static PetscErrorCode MatGetSubMatrices_MPIAdj_Private(Mat mat,PetscInt n,const IS irow[],const IS icol[],PetscBool subcomm,MatReuse scall,Mat *submat[])
{
PetscInt i,irow_n,icol_n,*sxadj,*sadjncy,*svalues;
PetscInt *indices,nindx,j,k,loc;
PetscMPIInt issame;
const PetscInt *irow_indices,*icol_indices;
MPI_Comm scomm_row,scomm_col,scomm_mat;
PetscErrorCode ierr;
PetscFunctionBegin;
nindx = 0;
/*
* Estimate a maximum number for allocating memory
*/
for(i=0; i<n; i++){
ierr = ISGetLocalSize(irow[i],&irow_n);CHKERRQ(ierr);
ierr = ISGetLocalSize(icol[i],&icol_n);CHKERRQ(ierr);
nindx = nindx>(irow_n+icol_n)? nindx:(irow_n+icol_n);
}
ierr = PetscCalloc1(nindx,&indices);CHKERRQ(ierr);
/* construct a submat */
for(i=0; i<n; i++){
/*comms */
if(subcomm){
ierr = PetscObjectGetComm((PetscObject)irow[i],&scomm_row);CHKERRQ(ierr);
ierr = PetscObjectGetComm((PetscObject)icol[i],&scomm_col);CHKERRQ(ierr);
ierr = MPI_Comm_compare(scomm_row,scomm_col,&issame);CHKERRQ(ierr);
if(issame != MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"row index set must have the same comm as the col index set\n");
ierr = MPI_Comm_compare(scomm_row,PETSC_COMM_SELF,&issame);CHKERRQ(ierr);
if(issame == MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP," can not use PETSC_COMM_SELF as comm when extracting a parallel submatrix\n");
}else{
scomm_row = PETSC_COMM_SELF;
}
/*get sub-matrix data*/
sxadj=0; sadjncy=0; svalues=0;
ierr = MatGetSubMatrix_MPIAdj_data(mat,irow[i],icol[i],&sxadj,&sadjncy,&svalues);CHKERRQ(ierr);
ierr = ISGetLocalSize(irow[i],&irow_n);CHKERRQ(ierr);
ierr = ISGetLocalSize(icol[i],&icol_n);CHKERRQ(ierr);
ierr = ISGetIndices(irow[i],&irow_indices);CHKERRQ(ierr);
ierr = PetscMemcpy(indices,irow_indices,sizeof(PetscInt)*irow_n);CHKERRQ(ierr);
ierr = ISRestoreIndices(irow[i],&irow_indices);CHKERRQ(ierr);
ierr = ISGetIndices(icol[i],&icol_indices);CHKERRQ(ierr);
ierr = PetscMemcpy(indices+irow_n,icol_indices,sizeof(PetscInt)*icol_n);CHKERRQ(ierr);
ierr = ISRestoreIndices(icol[i],&icol_indices);CHKERRQ(ierr);
nindx = irow_n+icol_n;
ierr = PetscSortRemoveDupsInt(&nindx,indices);CHKERRQ(ierr);
/* renumber columns */
for(j=0; j<irow_n; j++){
for(k=sxadj[j]; k<sxadj[j+1]; k++){
ierr = PetscFindInt(sadjncy[k],nindx,indices,&loc);CHKERRQ(ierr);
#if PETSC_USE_DEBUG
if(loc<0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"can not find col %d \n",sadjncy[k]);
#endif
sadjncy[k] = loc;
}
}
if(scall==MAT_INITIAL_MATRIX){
ierr = MatCreateMPIAdj(scomm_row,irow_n,icol_n,sxadj,sadjncy,svalues,submat[i]);CHKERRQ(ierr);
}else{
Mat sadj = *(submat[i]);
Mat_MPIAdj *sa = (Mat_MPIAdj*)((sadj)->data);
ierr = PetscObjectGetComm((PetscObject)sadj,&scomm_mat);CHKERRQ(ierr);
ierr = MPI_Comm_compare(scomm_row,scomm_mat,&issame);CHKERRQ(ierr);
if(issame != MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"submatrix must have the same comm as the col index set\n");
ierr = PetscMemcpy(sa->i,sxadj,sizeof(PetscInt)*(irow_n+1));CHKERRQ(ierr);
ierr = PetscMemcpy(sa->j,sadjncy,sizeof(PetscInt)*sxadj[irow_n]);CHKERRQ(ierr);
if(svalues){ierr = PetscMemcpy(sa->values,svalues,sizeof(PetscInt)*sxadj[irow_n]);CHKERRQ(ierr);}
ierr = PetscFree(sxadj);CHKERRQ(ierr);
ierr = PetscFree(sadjncy);CHKERRQ(ierr);
if(svalues) {ierr = PetscFree(svalues);CHKERRQ(ierr);}
}
}
ierr = PetscFree(indices);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例3: main
//.........这里部分代码省略.........
ierr = PetscPrintf(PETSC_COMM_SELF,"MatMult not equal to MatMultAdd Norm1=%e Norm2=%e bs = %D\n",s1norm,s2norm,bs);CHKERRQ(ierr);
}
}
/* Test MatMult() */
ierr = MatMultEqual(A,B,10,&flg);CHKERRQ(ierr);
if (!flg){
ierr = PetscPrintf(PETSC_COMM_SELF,"Error: MatMult()\n");CHKERRQ(ierr);
}
/* Test MatMultAdd() */
ierr = MatMultAddEqual(A,B,10,&flg);CHKERRQ(ierr);
if (!flg){
ierr = PetscPrintf(PETSC_COMM_SELF,"Error: MatMultAdd()\n");CHKERRQ(ierr);
}
/* Test MatMultTranspose() */
ierr = MatMultTransposeEqual(A,B,10,&flg);CHKERRQ(ierr);
if (!flg){
ierr = PetscPrintf(PETSC_COMM_SELF,"Error: MatMultTranspose()\n");CHKERRQ(ierr);
}
/* Test MatMultTransposeAdd() */
ierr = MatMultTransposeAddEqual(A,B,10,&flg);CHKERRQ(ierr);
if (!flg){
ierr = PetscPrintf(PETSC_COMM_SELF,"Error: MatMultTransposeAdd()\n");CHKERRQ(ierr);
}
/* Do LUFactor() on both the matrices */
ierr = PetscMalloc(M*sizeof(PetscInt),&idx);CHKERRQ(ierr);
for (i=0; i<M; i++) idx[i] = i;
ierr = ISCreateGeneral(PETSC_COMM_SELF,M,idx,PETSC_COPY_VALUES,&is1);CHKERRQ(ierr);
ierr = ISCreateGeneral(PETSC_COMM_SELF,M,idx,PETSC_COPY_VALUES,&is2);CHKERRQ(ierr);
ierr = PetscFree(idx);CHKERRQ(ierr);
ierr = ISSetPermutation(is1);CHKERRQ(ierr);
ierr = ISSetPermutation(is2);CHKERRQ(ierr);
ierr = MatFactorInfoInitialize(&info);CHKERRQ(ierr);
info.fill = 2.0;
info.dtcol = 0.0;
info.zeropivot = 1.e-14;
info.pivotinblocks = 1.0;
ierr = MatLUFactor(B,is1,is2,&info);CHKERRQ(ierr);
ierr = MatLUFactor(A,is1,is2,&info);CHKERRQ(ierr);
/* Test MatSolveAdd() */
for (i=0; i<10; i++) {
ierr = VecSetRandom(xx,rdm);CHKERRQ(ierr);
ierr = VecSetRandom(yy,rdm);CHKERRQ(ierr);
ierr = MatSolveAdd(B,xx,yy,s2);CHKERRQ(ierr);
ierr = MatSolveAdd(A,xx,yy,s1);CHKERRQ(ierr);
ierr = VecNorm(s1,NORM_2,&s1norm);CHKERRQ(ierr);
ierr = VecNorm(s2,NORM_2,&s2norm);CHKERRQ(ierr);
rnorm = s2norm-s1norm;
if (rnorm<-tol || rnorm>tol) {
ierr = PetscPrintf(PETSC_COMM_SELF,"Error:MatSolveAdd - Norm1=%16.14e Norm2=%16.14e bs = %D\n",s1norm,s2norm,bs);CHKERRQ(ierr);
}
}
/* Test MatSolveAdd() when x = A'b +x */
for (i=0; i<10; i++) {
ierr = VecSetRandom(xx,rdm);CHKERRQ(ierr);
ierr = VecSetRandom(s1,rdm);CHKERRQ(ierr);
ierr = VecCopy(s2,s1);CHKERRQ(ierr);
ierr = MatSolveAdd(B,xx,s2,s2);CHKERRQ(ierr);
ierr = MatSolveAdd(A,xx,s1,s1);CHKERRQ(ierr);
示例4: MatLUFactorNumeric_SuperLU_DIST
PetscErrorCode MatLUFactorNumeric_SuperLU_DIST(Mat F,Mat A,const MatFactorInfo *info)
{
Mat *tseq,A_seq = NULL;
Mat_SeqAIJ *aa,*bb;
Mat_SuperLU_DIST *lu = (Mat_SuperLU_DIST*)(F)->spptr;
PetscErrorCode ierr;
PetscInt M=A->rmap->N,N=A->cmap->N,i,*ai,*aj,*bi,*bj,nz,rstart,*garray,
m=A->rmap->n, colA_start,j,jcol,jB,countA,countB,*bjj,*ajj;
int sinfo; /* SuperLU_Dist info flag is always an int even with long long indices */
PetscMPIInt size;
SuperLUStat_t stat;
double *berr=0;
IS isrow;
Mat F_diag=NULL;
#if defined(PETSC_USE_COMPLEX)
doublecomplex *av, *bv;
#else
double *av, *bv;
#endif
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
if (lu->MatInputMode == GLOBAL) { /* global mat input */
if (size > 1) { /* convert mpi A to seq mat A */
ierr = ISCreateStride(PETSC_COMM_SELF,M,0,1,&isrow);CHKERRQ(ierr);
ierr = MatGetSubMatrices(A,1,&isrow,&isrow,MAT_INITIAL_MATRIX,&tseq);CHKERRQ(ierr);
ierr = ISDestroy(&isrow);CHKERRQ(ierr);
A_seq = *tseq;
ierr = PetscFree(tseq);CHKERRQ(ierr);
aa = (Mat_SeqAIJ*)A_seq->data;
} else {
PetscBool flg;
ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&flg);CHKERRQ(ierr);
if (flg) {
Mat_MPIAIJ *At = (Mat_MPIAIJ*)A->data;
A = At->A;
}
aa = (Mat_SeqAIJ*)A->data;
}
/* Convert Petsc NR matrix to SuperLU_DIST NC.
Note: memories of lu->val, col and row are allocated by CompRow_to_CompCol_dist()! */
if (lu->options.Fact != DOFACT) {/* successive numeric factorization, sparsity pattern is reused. */
PetscStackCall("SuperLU_DIST:Destroy_CompCol_Matrix_dist",Destroy_CompCol_Matrix_dist(&lu->A_sup));
if (lu->FactPattern == SamePattern_SameRowPerm) {
lu->options.Fact = SamePattern_SameRowPerm; /* matrix has similar numerical values */
} else { /* lu->FactPattern == SamePattern */
PetscStackCall("SuperLU_DIST:Destroy_LU",Destroy_LU(N, &lu->grid, &lu->LUstruct));
lu->options.Fact = SamePattern;
}
}
#if defined(PETSC_USE_COMPLEX)
PetscStackCall("SuperLU_DIST:zCompRow_to_CompCol_dist",zCompRow_to_CompCol_dist(M,N,aa->nz,(doublecomplex*)aa->a,(int_t*)aa->j,(int_t*)aa->i,&lu->val,&lu->col, &lu->row));
#else
PetscStackCall("SuperLU_DIST:dCompRow_to_CompCol_dist",dCompRow_to_CompCol_dist(M,N,aa->nz,aa->a,(int_t*)aa->j,(int_t*)aa->i,&lu->val, &lu->col, &lu->row));
#endif
/* Create compressed column matrix A_sup. */
#if defined(PETSC_USE_COMPLEX)
PetscStackCall("SuperLU_DIST:zCreate_CompCol_Matrix_dist",zCreate_CompCol_Matrix_dist(&lu->A_sup, M, N, aa->nz, lu->val, lu->col, lu->row, SLU_NC, SLU_Z, SLU_GE));
#else
PetscStackCall("SuperLU_DIST:dCreate_CompCol_Matrix_dist",dCreate_CompCol_Matrix_dist(&lu->A_sup, M, N, aa->nz, lu->val, lu->col, lu->row, SLU_NC, SLU_D, SLU_GE));
#endif
} else { /* distributed mat input */
Mat_MPIAIJ *mat = (Mat_MPIAIJ*)A->data;
aa=(Mat_SeqAIJ*)(mat->A)->data;
bb=(Mat_SeqAIJ*)(mat->B)->data;
ai=aa->i; aj=aa->j;
bi=bb->i; bj=bb->j;
#if defined(PETSC_USE_COMPLEX)
av=(doublecomplex*)aa->a;
bv=(doublecomplex*)bb->a;
#else
av=aa->a;
bv=bb->a;
#endif
rstart = A->rmap->rstart;
nz = aa->nz + bb->nz;
garray = mat->garray;
if (lu->options.Fact == DOFACT) { /* first numeric factorization */
#if defined(PETSC_USE_COMPLEX)
PetscStackCall("SuperLU_DIST:zallocateA_dist",zallocateA_dist(m, nz, &lu->val, &lu->col, &lu->row));
#else
PetscStackCall("SuperLU_DIST:dallocateA_dist",dallocateA_dist(m, nz, &lu->val, &lu->col, &lu->row));
#endif
} else { /* successive numeric factorization, sparsity pattern and perm_c are reused. */
/* Destroy_CompRowLoc_Matrix_dist(&lu->A_sup); */ /* this leads to crash! However, see SuperLU_DIST_2.5/EXAMPLE/pzdrive2.c */
if (lu->FactPattern == SamePattern_SameRowPerm) {
lu->options.Fact = SamePattern_SameRowPerm; /* matrix has similar numerical values */
} else {
PetscStackCall("SuperLU_DIST:Destroy_LU",Destroy_LU(N, &lu->grid, &lu->LUstruct)); /* Deallocate storage associated with the L and U matrices. */
lu->options.Fact = SamePattern;
}
}
nz = 0;
for (i=0; i<m; i++) {
lu->row[i] = nz;
//.........这里部分代码省略.........
示例5: KSPDestroy_AGMRES
PetscErrorCode KSPDestroy_AGMRES(KSP ksp)
{
PetscErrorCode ierr;
KSP_AGMRES *agmres = (KSP_AGMRES*)ksp->data;
PetscFunctionBegin;
ierr = PetscFree(agmres->hh_origin);CHKERRQ(ierr);
ierr = PetscFree(agmres->nrs);CHKERRQ(ierr);
ierr = PetscFree(agmres->Qloc);CHKERRQ(ierr);
ierr = PetscFree(agmres->Rloc);CHKERRQ(ierr);
ierr = PetscFree(agmres->sgn);CHKERRQ(ierr);
ierr = PetscFree(agmres->tloc);CHKERRQ(ierr);
ierr = PetscFree(agmres->Rshift);CHKERRQ(ierr);
ierr = PetscFree(agmres->Ishift);CHKERRQ(ierr);
ierr = PetscFree(agmres->Scale);CHKERRQ(ierr);
ierr = PetscFree(agmres->wbufptr);CHKERRQ(ierr);
ierr = PetscFree(agmres->tau);CHKERRQ(ierr);
ierr = PetscFree(agmres->work);CHKERRQ(ierr);
ierr = PetscFree(agmres->temp);CHKERRQ(ierr);
ierr = PetscFree(agmres->select);CHKERRQ(ierr);
ierr = PetscFree(agmres->wr);CHKERRQ(ierr);
ierr = PetscFree(agmres->wi);CHKERRQ(ierr);
if (agmres->neig) {
ierr = VecDestroyVecs(MAXKSPSIZE,&agmres->TmpU);CHKERRQ(ierr);
ierr = PetscFree(agmres->perm);CHKERRQ(ierr);
ierr = PetscFree(agmres->MatEigL);CHKERRQ(ierr);
ierr = PetscFree(agmres->MatEigR);CHKERRQ(ierr);
ierr = PetscFree(agmres->Q);CHKERRQ(ierr);
ierr = PetscFree(agmres->Z);CHKERRQ(ierr);
ierr = PetscFree(agmres->beta);CHKERRQ(ierr);
}
ierr = KSPDestroy_DGMRES(ksp);
PetscFunctionReturn(0);
}
示例6: PCSetUp_MG
PetscErrorCode PCSetUp_MG(PC pc)
{
PC_MG *mg = (PC_MG*)pc->data;
PC_MG_Levels **mglevels = mg->levels;
PetscErrorCode ierr;
PetscInt i,n = mglevels[0]->levels;
PC cpc;
PetscBool preonly,lu,redundant,cholesky,svd,dump = PETSC_FALSE,opsset,use_amat;
Mat dA,dB;
Vec tvec;
DM *dms;
PetscViewer viewer = 0;
PetscFunctionBegin;
/* FIX: Move this to PCSetFromOptions_MG? */
if (mg->usedmfornumberoflevels) {
PetscInt levels;
ierr = DMGetRefineLevel(pc->dm,&levels);CHKERRQ(ierr);
levels++;
if (levels > n) { /* the problem is now being solved on a finer grid */
ierr = PCMGSetLevels(pc,levels,NULL);CHKERRQ(ierr);
n = levels;
ierr = PCSetFromOptions(pc);CHKERRQ(ierr); /* it is bad to call this here, but otherwise will never be called for the new hierarchy */
mglevels = mg->levels;
}
}
ierr = KSPGetPC(mglevels[0]->smoothd,&cpc);CHKERRQ(ierr);
/* If user did not provide fine grid operators OR operator was not updated since last global KSPSetOperators() */
/* so use those from global PC */
/* Is this what we always want? What if user wants to keep old one? */
ierr = KSPGetOperatorsSet(mglevels[n-1]->smoothd,NULL,&opsset);CHKERRQ(ierr);
if (opsset) {
Mat mmat;
ierr = KSPGetOperators(mglevels[n-1]->smoothd,NULL,&mmat);CHKERRQ(ierr);
if (mmat == pc->pmat) opsset = PETSC_FALSE;
}
if (!opsset) {
ierr = PCGetUseAmat(pc,&use_amat);CHKERRQ(ierr);
if(use_amat){
ierr = PetscInfo(pc,"Using outer operators to define finest grid operator \n because PCMGGetSmoother(pc,nlevels-1,&ksp);KSPSetOperators(ksp,...); was not called.\n");CHKERRQ(ierr);
ierr = KSPSetOperators(mglevels[n-1]->smoothd,pc->mat,pc->pmat);CHKERRQ(ierr);
}
else {
ierr = PetscInfo(pc,"Using matrix (pmat) operators to define finest grid operator \n because PCMGGetSmoother(pc,nlevels-1,&ksp);KSPSetOperators(ksp,...); was not called.\n");CHKERRQ(ierr);
ierr = KSPSetOperators(mglevels[n-1]->smoothd,pc->pmat,pc->pmat);CHKERRQ(ierr);
}
}
/* Skipping this for galerkin==2 (externally managed hierarchy such as ML and GAMG). Cleaner logic here would be great. Wrap ML/GAMG as DMs? */
if (pc->dm && mg->galerkin != 2 && !pc->setupcalled) {
/* construct the interpolation from the DMs */
Mat p;
Vec rscale;
ierr = PetscMalloc1(n,&dms);CHKERRQ(ierr);
dms[n-1] = pc->dm;
/* Separately create them so we do not get DMKSP interference between levels */
for (i=n-2; i>-1; i--) {ierr = DMCoarsen(dms[i+1],MPI_COMM_NULL,&dms[i]);CHKERRQ(ierr);}
for (i=n-2; i>-1; i--) {
DMKSP kdm;
ierr = KSPSetDM(mglevels[i]->smoothd,dms[i]);CHKERRQ(ierr);
if (mg->galerkin) {ierr = KSPSetDMActive(mglevels[i]->smoothd,PETSC_FALSE);CHKERRQ(ierr);}
ierr = DMGetDMKSPWrite(dms[i],&kdm);CHKERRQ(ierr);
/* Ugly hack so that the next KSPSetUp() will use the RHS that we set. A better fix is to change dmActive to take
* a bitwise OR of computing the matrix, RHS, and initial iterate. */
kdm->ops->computerhs = NULL;
kdm->rhsctx = NULL;
if (!mglevels[i+1]->interpolate) {
ierr = DMCreateInterpolation(dms[i],dms[i+1],&p,&rscale);CHKERRQ(ierr);
ierr = PCMGSetInterpolation(pc,i+1,p);CHKERRQ(ierr);
if (rscale) {ierr = PCMGSetRScale(pc,i+1,rscale);CHKERRQ(ierr);}
ierr = VecDestroy(&rscale);CHKERRQ(ierr);
ierr = MatDestroy(&p);CHKERRQ(ierr);
}
}
for (i=n-2; i>-1; i--) {ierr = DMDestroy(&dms[i]);CHKERRQ(ierr);}
ierr = PetscFree(dms);CHKERRQ(ierr);
}
if (pc->dm && !pc->setupcalled) {
/* finest smoother also gets DM but it is not active, independent of whether galerkin==2 */
ierr = KSPSetDM(mglevels[n-1]->smoothd,pc->dm);CHKERRQ(ierr);
ierr = KSPSetDMActive(mglevels[n-1]->smoothd,PETSC_FALSE);CHKERRQ(ierr);
}
if (mg->galerkin == 1) {
Mat B;
/* currently only handle case where mat and pmat are the same on coarser levels */
ierr = KSPGetOperators(mglevels[n-1]->smoothd,&dA,&dB);CHKERRQ(ierr);
if (!pc->setupcalled) {
for (i=n-2; i>-1; i--) {
if (!mglevels[i+1]->restrct && !mglevels[i+1]->interpolate) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONGSTATE,"Must provide interpolation or restriction for each MG level except level 0");
if (!mglevels[i+1]->interpolate) {
ierr = PCMGSetInterpolation(pc,i+1,mglevels[i+1]->restrct);CHKERRQ(ierr);
}
if (!mglevels[i+1]->restrct) {
ierr = PCMGSetRestriction(pc,i+1,mglevels[i+1]->interpolate);CHKERRQ(ierr);
//.........这里部分代码省略.........
示例7: PCBDDCNullSpaceAssembleCoarse
PetscErrorCode PCBDDCNullSpaceAssembleCoarse(PC pc, MatNullSpace* CoarseNullSpace)
{
PC_BDDC *pcbddc = (PC_BDDC*)pc->data;
Mat_IS *matis = (Mat_IS*)pc->pmat->data;
MatNullSpace tempCoarseNullSpace;
const Vec *nsp_vecs;
Vec *coarse_nsp_vecs,local_vec,local_primal_vec;
PetscInt nsp_size,coarse_nsp_size,i;
PetscBool nsp_has_cnst;
PetscReal test_null;
PetscErrorCode ierr;
PetscFunctionBegin;
tempCoarseNullSpace = 0;
coarse_nsp_size = 0;
coarse_nsp_vecs = 0;
ierr = MatNullSpaceGetVecs(pcbddc->NullSpace,&nsp_has_cnst,&nsp_size,&nsp_vecs);CHKERRQ(ierr);
if (pcbddc->coarse_mat) {
ierr = PetscMalloc((nsp_size+1)*sizeof(Vec),&coarse_nsp_vecs);CHKERRQ(ierr);
for (i=0;i<nsp_size+1;i++) {
ierr = VecDuplicate(pcbddc->coarse_vec,&coarse_nsp_vecs[i]);CHKERRQ(ierr);
}
}
ierr = MatGetVecs(pcbddc->ConstraintMatrix,&local_vec,&local_primal_vec);CHKERRQ(ierr);
if (nsp_has_cnst) {
ierr = VecSet(local_vec,1.0);CHKERRQ(ierr);
ierr = MatMult(pcbddc->ConstraintMatrix,local_vec,local_primal_vec);CHKERRQ(ierr);
ierr = PCBDDCScatterCoarseDataBegin(pc,local_primal_vec,pcbddc->coarse_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = PCBDDCScatterCoarseDataEnd(pc,local_primal_vec,pcbddc->coarse_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
if (pcbddc->coarse_mat) {
if (pcbddc->dbg_flag) {
ierr = MatMult(pcbddc->coarse_mat,pcbddc->coarse_vec,pcbddc->coarse_rhs);CHKERRQ(ierr);
ierr = VecNorm(pcbddc->coarse_rhs,NORM_INFINITY,&test_null);CHKERRQ(ierr);
ierr = PetscViewerASCIIPrintf(pcbddc->dbg_viewer,"Constant coarse null space error % 1.14e\n",test_null);CHKERRQ(ierr);
}
ierr = VecCopy(pcbddc->coarse_vec,coarse_nsp_vecs[coarse_nsp_size]);CHKERRQ(ierr);
coarse_nsp_size++;
}
}
for (i=0;i<nsp_size;i++) {
ierr = VecScatterBegin(matis->ctx,nsp_vecs[i],local_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = VecScatterEnd(matis->ctx,nsp_vecs[i],local_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = MatMult(pcbddc->ConstraintMatrix,local_vec,local_primal_vec);CHKERRQ(ierr);
ierr = PCBDDCScatterCoarseDataBegin(pc,local_primal_vec,pcbddc->coarse_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = PCBDDCScatterCoarseDataEnd(pc,local_primal_vec,pcbddc->coarse_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
if (pcbddc->coarse_mat) {
if (pcbddc->dbg_flag) {
ierr = MatMult(pcbddc->coarse_mat,pcbddc->coarse_vec,pcbddc->coarse_rhs);CHKERRQ(ierr);
ierr = VecNorm(pcbddc->coarse_rhs,NORM_2,&test_null);CHKERRQ(ierr);
ierr = PetscViewerASCIIPrintf(pcbddc->dbg_viewer,"Vec %d coarse null space error % 1.14e\n",i,test_null);CHKERRQ(ierr);
}
ierr = VecCopy(pcbddc->coarse_vec,coarse_nsp_vecs[coarse_nsp_size]);CHKERRQ(ierr);
coarse_nsp_size++;
}
}
if (coarse_nsp_size > 0) {
ierr = PCBDDCOrthonormalizeVecs(coarse_nsp_size,coarse_nsp_vecs);CHKERRQ(ierr);
ierr = MatNullSpaceCreate(PetscObjectComm((PetscObject)(pcbddc->coarse_mat)),PETSC_FALSE,coarse_nsp_size,coarse_nsp_vecs,&tempCoarseNullSpace);CHKERRQ(ierr);
for (i=0;i<nsp_size+1;i++) {
ierr = VecDestroy(&coarse_nsp_vecs[i]);CHKERRQ(ierr);
}
}
ierr = PetscFree(coarse_nsp_vecs);CHKERRQ(ierr);
ierr = VecDestroy(&local_vec);CHKERRQ(ierr);
ierr = VecDestroy(&local_primal_vec);CHKERRQ(ierr);
*CoarseNullSpace = tempCoarseNullSpace;
PetscFunctionReturn(0);
}
示例8: VecAssemblyBegin_MPI_BTS
static PetscErrorCode VecAssemblyBegin_MPI_BTS(Vec X)
{
Vec_MPI *x = (Vec_MPI*)X->data;
PetscErrorCode ierr;
MPI_Comm comm;
PetscInt i,j,jb,bs;
PetscFunctionBegin;
if (X->stash.donotstash) PetscFunctionReturn(0);
ierr = PetscObjectGetComm((PetscObject)X,&comm);CHKERRQ(ierr);
ierr = VecGetBlockSize(X,&bs);CHKERRQ(ierr);
#if defined(PETSC_USE_DEBUG)
{
InsertMode addv;
ierr = MPIU_Allreduce((PetscEnum*)&X->stash.insertmode,(PetscEnum*)&addv,1,MPIU_ENUM,MPI_BOR,comm);CHKERRQ(ierr);
if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(comm,PETSC_ERR_ARG_NOTSAMETYPE,"Some processors inserted values while others added");
}
#endif
X->bstash.insertmode = X->stash.insertmode; /* Block stash implicitly tracks InsertMode of scalar stash */
ierr = VecStashSortCompress_Private(&X->stash);CHKERRQ(ierr);
ierr = VecStashSortCompress_Private(&X->bstash);CHKERRQ(ierr);
if (!x->sendranks) {
PetscMPIInt nowners,bnowners,*owners,*bowners;
PetscInt ntmp;
ierr = VecStashGetOwnerList_Private(&X->stash,X->map,&nowners,&owners);CHKERRQ(ierr);
ierr = VecStashGetOwnerList_Private(&X->bstash,X->map,&bnowners,&bowners);CHKERRQ(ierr);
ierr = PetscMergeMPIIntArray(nowners,owners,bnowners,bowners,&ntmp,&x->sendranks);CHKERRQ(ierr);
x->nsendranks = ntmp;
ierr = PetscFree(owners);CHKERRQ(ierr);
ierr = PetscFree(bowners);CHKERRQ(ierr);
ierr = PetscMalloc1(x->nsendranks,&x->sendhdr);CHKERRQ(ierr);
ierr = PetscCalloc1(x->nsendranks,&x->sendptrs);CHKERRQ(ierr);
}
for (i=0,j=0,jb=0; i<x->nsendranks; i++) {
PetscMPIInt rank = x->sendranks[i];
x->sendhdr[i].insertmode = X->stash.insertmode;
/* Initialize pointers for non-empty stashes the first time around. Subsequent assemblies with
* VEC_SUBSET_OFF_PROC_ENTRIES will leave the old pointers (dangling because the stash has been collected) when
* there is nothing new to send, so that size-zero messages get sent instead. */
x->sendhdr[i].count = 0;
if (X->stash.n) {
x->sendptrs[i].ints = &X->stash.idx[j];
x->sendptrs[i].scalars = &X->stash.array[j];
for ( ; j<X->stash.n && X->stash.idx[j] < X->map->range[rank+1]; j++) x->sendhdr[i].count++;
}
x->sendhdr[i].bcount = 0;
if (X->bstash.n) {
x->sendptrs[i].intb = &X->bstash.idx[jb];
x->sendptrs[i].scalarb = &X->bstash.array[jb*bs];
for ( ; jb<X->bstash.n && X->bstash.idx[jb]*bs < X->map->range[rank+1]; jb++) x->sendhdr[i].bcount++;
}
}
if (!x->segrecvint) {ierr = PetscSegBufferCreate(sizeof(PetscInt),1000,&x->segrecvint);CHKERRQ(ierr);}
if (!x->segrecvscalar) {ierr = PetscSegBufferCreate(sizeof(PetscScalar),1000,&x->segrecvscalar);CHKERRQ(ierr);}
if (!x->segrecvframe) {ierr = PetscSegBufferCreate(sizeof(VecAssemblyFrame),50,&x->segrecvframe);CHKERRQ(ierr);}
if (x->recvhdr) { /* VEC_SUBSET_OFF_PROC_ENTRIES and this is not the first assembly */
PetscMPIInt tag[4];
if (!x->assembly_subset) SETERRQ(comm,PETSC_ERR_PLIB,"Attempt to reuse rendezvous when not VEC_SUBSET_OFF_PROC_ENTRIES");
for (i=0; i<4; i++) {ierr = PetscCommGetNewTag(comm,&tag[i]);CHKERRQ(ierr);}
for (i=0; i<x->nsendranks; i++) {
ierr = VecAssemblySend_MPI_Private(comm,tag,i,x->sendranks[i],x->sendhdr+i,x->sendreqs+4*i,X);CHKERRQ(ierr);
}
for (i=0; i<x->nrecvranks; i++) {
ierr = VecAssemblyRecv_MPI_Private(comm,tag,x->recvranks[i],x->recvhdr+i,x->recvreqs+4*i,X);CHKERRQ(ierr);
}
x->use_status = PETSC_TRUE;
} else { /* First time */
ierr = PetscCommBuildTwoSidedFReq(comm,3,MPIU_INT,x->nsendranks,x->sendranks,(PetscInt*)x->sendhdr,&x->nrecvranks,&x->recvranks,&x->recvhdr,4,&x->sendreqs,&x->recvreqs,VecAssemblySend_MPI_Private,VecAssemblyRecv_MPI_Private,X);CHKERRQ(ierr);
x->use_status = PETSC_FALSE;
}
{
PetscInt nstash,reallocs;
ierr = VecStashGetInfo_Private(&X->stash,&nstash,&reallocs);CHKERRQ(ierr);
ierr = PetscInfo2(X,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr);
ierr = VecStashGetInfo_Private(&X->bstash,&nstash,&reallocs);CHKERRQ(ierr);
ierr = PetscInfo2(X,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
示例9: PCBDDCNullSpaceAssembleCorrection
//.........这里部分代码省略.........
ierr = VecSet(work1,one);CHKERRQ(ierr);
ierr = VecPlaceArray(work2,(const PetscScalar*)&Kbasis_mat[k*basis_dofs]);CHKERRQ(ierr);
ierr = MatMult(local_mat,work1,work2);CHKERRQ(ierr);
ierr = VecResetArray(work1);CHKERRQ(ierr);
ierr = VecResetArray(work2);CHKERRQ(ierr);
}
ierr = VecDestroy(&work1);CHKERRQ(ierr);
ierr = VecDestroy(&work2);CHKERRQ(ierr);
ierr = VecScatterDestroy(&scatter_ctx);CHKERRQ(ierr);
ierr = MatDenseRestoreArray(shell_ctx->basis_mat,&basis_mat);CHKERRQ(ierr);
ierr = MatDenseRestoreArray(shell_ctx->Kbasis_mat,&Kbasis_mat);CHKERRQ(ierr);
/* Assemble another Mat object in shell context */
ierr = MatTransposeMatMult(shell_ctx->basis_mat,shell_ctx->Kbasis_mat,MAT_INITIAL_MATRIX,PETSC_DEFAULT,&small_mat);CHKERRQ(ierr);
ierr = MatFactorInfoInitialize(&matinfo);CHKERRQ(ierr);
ierr = ISCreateStride(PETSC_COMM_SELF,basis_size,0,1,&is_aux);CHKERRQ(ierr);
ierr = MatLUFactor(small_mat,is_aux,is_aux,&matinfo);CHKERRQ(ierr);
ierr = ISDestroy(&is_aux);CHKERRQ(ierr);
ierr = PetscMalloc(basis_size*basis_size*sizeof(PetscScalar),&array_mat);CHKERRQ(ierr);
for (k=0;k<basis_size;k++) {
ierr = VecSet(shell_ctx->work_small_1,zero);CHKERRQ(ierr);
ierr = VecSetValue(shell_ctx->work_small_1,k,one,INSERT_VALUES);CHKERRQ(ierr);
ierr = VecAssemblyBegin(shell_ctx->work_small_1);CHKERRQ(ierr);
ierr = VecAssemblyEnd(shell_ctx->work_small_1);CHKERRQ(ierr);
ierr = MatSolve(small_mat,shell_ctx->work_small_1,shell_ctx->work_small_2);CHKERRQ(ierr);
ierr = VecGetArrayRead(shell_ctx->work_small_2,(const PetscScalar**)&array);CHKERRQ(ierr);
for (i=0;i<basis_size;i++) {
array_mat[i*basis_size+k]=array[i];
}
ierr = VecRestoreArrayRead(shell_ctx->work_small_2,(const PetscScalar**)&array);CHKERRQ(ierr);
}
ierr = MatCreateSeqDense(PETSC_COMM_SELF,basis_size,basis_size,array_mat,&inv_small_mat);CHKERRQ(ierr);
ierr = MatMatMult(shell_ctx->basis_mat,inv_small_mat,MAT_INITIAL_MATRIX,PETSC_DEFAULT,&shell_ctx->Lbasis_mat);CHKERRQ(ierr);
ierr = PetscFree(array_mat);CHKERRQ(ierr);
ierr = MatDestroy(&inv_small_mat);CHKERRQ(ierr);
ierr = MatDestroy(&small_mat);CHKERRQ(ierr);
ierr = MatScale(shell_ctx->Kbasis_mat,m_one);CHKERRQ(ierr);
/* Rebuild local PC */
ierr = KSPGetPC(*local_ksp,&shell_ctx->local_pc);CHKERRQ(ierr);
ierr = PetscObjectReference((PetscObject)shell_ctx->local_pc);CHKERRQ(ierr);
ierr = PCCreate(PETSC_COMM_SELF,&newpc);CHKERRQ(ierr);
ierr = PCSetOperators(newpc,local_mat,local_mat,SAME_PRECONDITIONER);CHKERRQ(ierr);
ierr = PCSetType(newpc,PCSHELL);CHKERRQ(ierr);
ierr = PCShellSetContext(newpc,shell_ctx);CHKERRQ(ierr);
ierr = PCShellSetApply(newpc,PCBDDCApplyNullSpaceCorrectionPC);CHKERRQ(ierr);
ierr = PCShellSetDestroy(newpc,PCBDDCDestroyNullSpaceCorrectionPC);CHKERRQ(ierr);
ierr = PCSetUp(newpc);CHKERRQ(ierr);
ierr = KSPSetPC(*local_ksp,newpc);CHKERRQ(ierr);
ierr = PCDestroy(&newpc);CHKERRQ(ierr);
ierr = KSPSetUp(*local_ksp);CHKERRQ(ierr);
/* test */
/* TODO: this cause a deadlock when doing multilevel */
#if 0
if (pcbddc->dbg_flag) {
KSP check_ksp;
PC check_pc;
Mat test_mat;
Vec work3;
PetscViewer viewer=pcbddc->dbg_viewer;
PetscReal test_err,lambda_min,lambda_max;
PetscBool setsym,issym=PETSC_FALSE;
ierr = KSPGetPC(*local_ksp,&check_pc);CHKERRQ(ierr);
ierr = VecDuplicate(shell_ctx->work_full_1,&work1);CHKERRQ(ierr);
ierr = VecDuplicate(shell_ctx->work_full_1,&work2);CHKERRQ(ierr);
示例10: PCBDDCNullSpaceAdaptGlobal
PetscErrorCode PCBDDCNullSpaceAdaptGlobal(PC pc)
{
PC_IS* pcis = (PC_IS*) (pc->data);
PC_BDDC* pcbddc = (PC_BDDC*)(pc->data);
KSP inv_change;
PC pc_change;
const Vec *nsp_vecs;
Vec *new_nsp_vecs;
PetscInt i,nsp_size,new_nsp_size,start_new;
PetscBool nsp_has_cnst;
MatNullSpace new_nsp;
PetscErrorCode ierr;
MPI_Comm comm;
PetscFunctionBegin;
/* create KSP for change of basis */
ierr = KSPCreate(PETSC_COMM_SELF,&inv_change);CHKERRQ(ierr);
ierr = KSPSetOperators(inv_change,pcbddc->ChangeOfBasisMatrix,pcbddc->ChangeOfBasisMatrix,SAME_PRECONDITIONER);CHKERRQ(ierr);
ierr = KSPSetType(inv_change,KSPPREONLY);CHKERRQ(ierr);
ierr = KSPGetPC(inv_change,&pc_change);CHKERRQ(ierr);
ierr = PCSetType(pc_change,PCLU);CHKERRQ(ierr);
ierr = KSPSetUp(inv_change);CHKERRQ(ierr);
/* get nullspace and transform it */
ierr = MatNullSpaceGetVecs(pcbddc->NullSpace,&nsp_has_cnst,&nsp_size,&nsp_vecs);CHKERRQ(ierr);
new_nsp_size = nsp_size;
if (nsp_has_cnst) {
new_nsp_size++;
}
ierr = PetscMalloc(new_nsp_size*sizeof(Vec),&new_nsp_vecs);CHKERRQ(ierr);
for (i=0;i<new_nsp_size;i++) {
ierr = VecDuplicate(pcis->vec1_global,&new_nsp_vecs[i]);CHKERRQ(ierr);
}
start_new = 0;
if (nsp_has_cnst) {
start_new = 1;
ierr = VecSet(new_nsp_vecs[0],1.0);CHKERRQ(ierr);
ierr = VecSet(pcis->vec1_B,1.0);CHKERRQ(ierr);
ierr = KSPSolve(inv_change,pcis->vec1_B,pcis->vec1_B);
ierr = VecScatterBegin(pcis->global_to_B,pcis->vec1_B,new_nsp_vecs[0],INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
ierr = VecScatterEnd (pcis->global_to_B,pcis->vec1_B,new_nsp_vecs[0],INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
}
for (i=0;i<nsp_size;i++) {
ierr = VecCopy(nsp_vecs[i],new_nsp_vecs[i+start_new]);CHKERRQ(ierr);
ierr = VecScatterBegin(pcis->global_to_B,nsp_vecs[i],pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = VecScatterEnd (pcis->global_to_B,nsp_vecs[i],pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = KSPSolve(inv_change,pcis->vec1_B,pcis->vec1_B);
ierr = VecScatterBegin(pcis->global_to_B,pcis->vec1_B,new_nsp_vecs[i+start_new],INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
ierr = VecScatterEnd (pcis->global_to_B,pcis->vec1_B,new_nsp_vecs[i+start_new],INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
}
ierr = PCBDDCOrthonormalizeVecs(new_nsp_size,new_nsp_vecs);CHKERRQ(ierr);
#if 0
PetscBool nsp_t=PETSC_FALSE;
ierr = MatNullSpaceTest(pcbddc->NullSpace,pc->pmat,&nsp_t);CHKERRQ(ierr);
printf("Original Null Space test: %d\n",nsp_t);
Mat temp_mat;
Mat_IS* matis = (Mat_IS*)pc->pmat->data;
temp_mat = matis->A;
matis->A = pcbddc->local_mat;
pcbddc->local_mat = temp_mat;
ierr = MatNullSpaceTest(pcbddc->NullSpace,pc->pmat,&nsp_t);CHKERRQ(ierr);
printf("Original Null Space, mat changed test: %d\n",nsp_t);
{
PetscReal test_norm;
for (i=0;i<new_nsp_size;i++) {
ierr = MatMult(pc->pmat,new_nsp_vecs[i],pcis->vec1_global);CHKERRQ(ierr);
ierr = VecNorm(pcis->vec1_global,NORM_2,&test_norm);CHKERRQ(ierr);
if (test_norm > 1.e-12) {
printf("------------ERROR VEC %d------------------\n",i);
ierr = VecView(pcis->vec1_global,PETSC_VIEWER_STDOUT_WORLD);
printf("------------------------------------------\n");
}
}
}
#endif
ierr = KSPDestroy(&inv_change);CHKERRQ(ierr);
ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
ierr = MatNullSpaceCreate(comm,PETSC_FALSE,new_nsp_size,new_nsp_vecs,&new_nsp);CHKERRQ(ierr);
ierr = PCBDDCSetNullSpace(pc,new_nsp);CHKERRQ(ierr);
ierr = MatNullSpaceDestroy(&new_nsp);CHKERRQ(ierr);
#if 0
ierr = MatNullSpaceTest(pcbddc->NullSpace,pc->pmat,&nsp_t);CHKERRQ(ierr);
printf("New Null Space, mat changed: %d\n",nsp_t);
temp_mat = matis->A;
matis->A = pcbddc->local_mat;
pcbddc->local_mat = temp_mat;
ierr = MatNullSpaceTest(pcbddc->NullSpace,pc->pmat,&nsp_t);CHKERRQ(ierr);
printf("New Null Space, mat original: %d\n",nsp_t);
#endif
for (i=0;i<new_nsp_size;i++) {
ierr = VecDestroy(&new_nsp_vecs[i]);CHKERRQ(ierr);
}
ierr = PetscFree(new_nsp_vecs);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例11: DMCreate
//.........这里部分代码省略.........
for (cs = 0, c = 0; cs < num_cs; cs++) {
ierr = ex_get_elem_block(exoid, cs_id[cs], buffer, &num_cell_in_set, &num_vertex_per_cell, &num_attr);CHKERRQ(ierr);
for (c_loc = 0; c_loc < num_cell_in_set; ++c_loc, ++c) {
ierr = DMPlexSetConeSize(*dm, c, num_vertex_per_cell);CHKERRQ(ierr);
}
}
ierr = DMSetUp(*dm);CHKERRQ(ierr);
for (cs = 0, c = 0; cs < num_cs; cs++) {
ierr = ex_get_elem_block(exoid, cs_id[cs], buffer, &num_cell_in_set, &num_vertex_per_cell, &num_attr);CHKERRQ(ierr);
ierr = PetscMalloc2(num_vertex_per_cell*num_cell_in_set,&cs_connect,num_vertex_per_cell,&cone);CHKERRQ(ierr);
ierr = ex_get_elem_conn(exoid, cs_id[cs], cs_connect);CHKERRQ(ierr);
/* EXO uses Fortran-based indexing, sieve uses C-style and numbers cell first then vertices. */
for (c_loc = 0, v = 0; c_loc < num_cell_in_set; ++c_loc, ++c) {
for (v_loc = 0; v_loc < num_vertex_per_cell; ++v_loc, ++v) {
cone[v_loc] = cs_connect[v]+numCells-1;
}
if (dim == 3) {
/* Tetrahedra are inverted */
if (num_vertex_per_cell == 4) {
PetscInt tmp = cone[0];
cone[0] = cone[1];
cone[1] = tmp;
}
/* Hexahedra are inverted */
if (num_vertex_per_cell == 8) {
PetscInt tmp = cone[1];
cone[1] = cone[3];
cone[3] = tmp;
}
}
ierr = DMPlexSetCone(*dm, c, cone);CHKERRQ(ierr);
ierr = DMSetLabelValue(*dm, "Cell Sets", c, cs_id[cs]);CHKERRQ(ierr);
}
ierr = PetscFree2(cs_connect,cone);CHKERRQ(ierr);
}
ierr = PetscFree(cs_id);CHKERRQ(ierr);
}
ierr = DMPlexSymmetrize(*dm);CHKERRQ(ierr);
ierr = DMPlexStratify(*dm);CHKERRQ(ierr);
if (interpolate) {
DM idm = NULL;
ierr = DMPlexInterpolate(*dm, &idm);CHKERRQ(ierr);
/* Maintain Cell Sets label */
{
DMLabel label;
ierr = DMRemoveLabel(*dm, "Cell Sets", &label);CHKERRQ(ierr);
if (label) {ierr = DMAddLabel(idm, label);CHKERRQ(ierr);}
}
ierr = DMDestroy(dm);CHKERRQ(ierr);
*dm = idm;
}
/* Create vertex set label */
if (!rank && (num_vs > 0)) {
int vs, v;
/* Read from ex_get_node_set_ids() */
int *vs_id;
/* Read from ex_get_node_set_param() */
int num_vertex_in_set, num_attr;
/* Read from ex_get_node_set() */
int *vs_vertex_list;
/* Get vertex set ids */
ierr = PetscMalloc1(num_vs, &vs_id);CHKERRQ(ierr);
示例12: MatCholeskyFactorNumeric_SeqSBAIJ_4_NaturalOrdering
//.........这里部分代码省略.........
dk[6] += uik[8]*u[4] + uik[9]*u[5] + uik[10]*u[6]+ uik[11]*u[7];
dk[7] += uik[12]*u[4]+ uik[13]*u[5]+ uik[14]*u[6]+ uik[15]*u[7];
dk[8] += uik[0]*u[8] + uik[1]*u[9] + uik[2]*u[10] + uik[3]*u[11];
dk[9] += uik[4]*u[8] + uik[5]*u[9] + uik[6]*u[10] + uik[7]*u[11];
dk[10]+= uik[8]*u[8] + uik[9]*u[9] + uik[10]*u[10]+ uik[11]*u[11];
dk[11]+= uik[12]*u[8]+ uik[13]*u[9]+ uik[14]*u[10]+ uik[15]*u[11];
dk[12]+= uik[0]*u[12] + uik[1]*u[13] + uik[2]*u[14] + uik[3]*u[15];
dk[13]+= uik[4]*u[12] + uik[5]*u[13] + uik[6]*u[14] + uik[7]*u[15];
dk[14]+= uik[8]*u[12] + uik[9]*u[13] + uik[10]*u[14]+ uik[11]*u[15];
dk[15]+= uik[12]*u[12]+ uik[13]*u[13]+ uik[14]*u[14]+ uik[15]*u[15];
ierr = PetscLogFlops(64.0*4.0);CHKERRQ(ierr);
/* update -U(i,k) */
ierr = PetscMemcpy(ba+ili*16,uik,16*sizeof(MatScalar));CHKERRQ(ierr);
/* add multiple of row i to k-th row ... */
jmin = ili + 1; jmax = bi[i+1];
if (jmin < jmax) {
for (j=jmin; j<jmax; j++) {
/* rtmp += -U(i,k)^T * U_bar(i,j) */
rtmp_ptr = rtmp + bj[j]*16;
u = ba + j*16;
rtmp_ptr[0] += uik[0]*u[0] + uik[1]*u[1] + uik[2]*u[2] + uik[3]*u[3];
rtmp_ptr[1] += uik[4]*u[0] + uik[5]*u[1] + uik[6]*u[2] + uik[7]*u[3];
rtmp_ptr[2] += uik[8]*u[0] + uik[9]*u[1] + uik[10]*u[2]+ uik[11]*u[3];
rtmp_ptr[3] += uik[12]*u[0]+ uik[13]*u[1]+ uik[14]*u[2]+ uik[15]*u[3];
rtmp_ptr[4] += uik[0]*u[4] + uik[1]*u[5] + uik[2]*u[6] + uik[3]*u[7];
rtmp_ptr[5] += uik[4]*u[4] + uik[5]*u[5] + uik[6]*u[6] + uik[7]*u[7];
rtmp_ptr[6] += uik[8]*u[4] + uik[9]*u[5] + uik[10]*u[6]+ uik[11]*u[7];
rtmp_ptr[7] += uik[12]*u[4]+ uik[13]*u[5]+ uik[14]*u[6]+ uik[15]*u[7];
rtmp_ptr[8] += uik[0]*u[8] + uik[1]*u[9] + uik[2]*u[10] + uik[3]*u[11];
rtmp_ptr[9] += uik[4]*u[8] + uik[5]*u[9] + uik[6]*u[10] + uik[7]*u[11];
rtmp_ptr[10]+= uik[8]*u[8] + uik[9]*u[9] + uik[10]*u[10]+ uik[11]*u[11];
rtmp_ptr[11]+= uik[12]*u[8]+ uik[13]*u[9]+ uik[14]*u[10]+ uik[15]*u[11];
rtmp_ptr[12]+= uik[0]*u[12] + uik[1]*u[13] + uik[2]*u[14] + uik[3]*u[15];
rtmp_ptr[13]+= uik[4]*u[12] + uik[5]*u[13] + uik[6]*u[14] + uik[7]*u[15];
rtmp_ptr[14]+= uik[8]*u[12] + uik[9]*u[13] + uik[10]*u[14]+ uik[11]*u[15];
rtmp_ptr[15]+= uik[12]*u[12]+ uik[13]*u[13]+ uik[14]*u[14]+ uik[15]*u[15];
}
ierr = PetscLogFlops(2.0*64.0*(jmax-jmin));CHKERRQ(ierr);
/* ... add i to row list for next nonzero entry */
il[i] = jmin; /* update il(i) in column k+1, ... mbs-1 */
j = bj[jmin];
jl[i] = jl[j]; jl[j] = i; /* update jl */
}
i = nexti;
}
/* save nonzero entries in k-th row of U ... */
/* invert diagonal block */
diag = ba+k*16;
ierr = PetscMemcpy(diag,dk,16*sizeof(MatScalar));CHKERRQ(ierr);
if (pivotinblocks) {
ierr = PetscKernel_A_gets_inverse_A_4(diag,shift);CHKERRQ(ierr);
} else {
ierr = PetscKernel_A_gets_inverse_A_4_nopivot(diag);CHKERRQ(ierr);
}
jmin = bi[k]; jmax = bi[k+1];
if (jmin < jmax) {
for (j=jmin; j<jmax; j++) {
vj = bj[j]; /* block col. index of U */
u = ba + j*16;
rtmp_ptr = rtmp + vj*16;
for (k1=0; k1<16; k1++) {
*u++ = *rtmp_ptr;
*rtmp_ptr++ = 0.0;
}
}
/* ... add k to row list for first nonzero entry in k-th row */
il[k] = jmin;
i = bj[jmin];
jl[k] = jl[i]; jl[i] = k;
}
}
ierr = PetscFree(rtmp);CHKERRQ(ierr);
ierr = PetscFree2(il,jl);CHKERRQ(ierr);
ierr = PetscFree2(dk,uik);CHKERRQ(ierr);
C->ops->solve = MatSolve_SeqSBAIJ_4_NaturalOrdering_inplace;
C->ops->solvetranspose = MatSolve_SeqSBAIJ_4_NaturalOrdering_inplace;
C->ops->forwardsolve = MatForwardSolve_SeqSBAIJ_4_NaturalOrdering_inplace;
C->ops->backwardsolve = MatBackwardSolve_SeqSBAIJ_4_NaturalOrdering_inplace;
C->assembled = PETSC_TRUE;
C->preallocated = PETSC_TRUE;
ierr = PetscLogFlops(1.3333*64*b->mbs);CHKERRQ(ierr); /* from inverting diagonal blocks */
PetscFunctionReturn(0);
}
示例13: main
int main(int argc,char **argv)
{
PetscErrorCode ierr;
PetscMPIInt size,rank;
PetscInt n = 5,i,*blks,bs = 1,m = 2;
PetscScalar value;
Vec x,y;
IS is1,is2;
VecScatter ctx = 0;
PetscViewer sviewer;
ierr = PetscInitialize(&argc,&argv,(char*)0,help);CHKERRQ(ierr);
ierr = PetscOptionsGetInt(NULL,"-n",&n,NULL);CHKERRQ(ierr);
ierr = PetscOptionsGetInt(NULL,"-bs",&bs,NULL);CHKERRQ(ierr);
ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
/* create two vectors */
ierr = VecCreate(PETSC_COMM_WORLD,&x);CHKERRQ(ierr);
ierr = VecSetSizes(x,PETSC_DECIDE,size*bs*n);CHKERRQ(ierr);
ierr = VecSetFromOptions(x);CHKERRQ(ierr);
/* create two index sets */
if (rank < size-1) m = n + 2;
else m = n;
ierr = PetscMalloc1(m,&blks);CHKERRQ(ierr);
blks[0] = n*rank;
for (i=1; i<m; i++) blks[i] = blks[i-1] + 1;
ierr = ISCreateBlock(PETSC_COMM_SELF,bs,m,blks,PETSC_COPY_VALUES,&is1);CHKERRQ(ierr);
ierr = PetscFree(blks);CHKERRQ(ierr);
ierr = VecCreateSeq(PETSC_COMM_SELF,bs*m,&y);CHKERRQ(ierr);
ierr = ISCreateStride(PETSC_COMM_SELF,bs*m,0,1,&is2);CHKERRQ(ierr);
/* each processor inserts the entire vector */
/* this is redundant but tests assembly */
for (i=0; i<bs*n*size; i++) {
value = (PetscScalar) i;
ierr = VecSetValues(x,1,&i,&value,INSERT_VALUES);CHKERRQ(ierr);
}
ierr = VecAssemblyBegin(x);CHKERRQ(ierr);
ierr = VecAssemblyEnd(x);CHKERRQ(ierr);
ierr = VecView(x,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
ierr = VecScatterCreate(x,is1,y,is2,&ctx);CHKERRQ(ierr);
ierr = VecScatterBegin(ctx,x,y,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = VecScatterEnd(ctx,x,y,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = PetscViewerASCIIPushSynchronized(PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
ierr = PetscViewerASCIISynchronizedPrintf(PETSC_VIEWER_STDOUT_WORLD,"----\n");CHKERRQ(ierr);
ierr = PetscViewerGetSubViewer(PETSC_VIEWER_STDOUT_WORLD,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr);
ierr = VecView(y,sviewer);CHKERRQ(ierr); fflush(stdout);
ierr = PetscViewerRestoreSubViewer(PETSC_VIEWER_STDOUT_WORLD,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr);
ierr = PetscViewerFlush(PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
ierr = PetscViewerASCIIPopSynchronized(PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
ierr = VecScatterDestroy(&ctx);CHKERRQ(ierr);
ierr = VecDestroy(&x);CHKERRQ(ierr);
ierr = VecDestroy(&y);CHKERRQ(ierr);
ierr = ISDestroy(&is1);CHKERRQ(ierr);
ierr = ISDestroy(&is2);CHKERRQ(ierr);
ierr = PetscFinalize();
return 0;
}
示例14: main
//.........这里部分代码省略.........
data_out = (fftw_complex*)fftw_malloc(sizeof(fftw_complex)*alloc_local);
data_out2 = (fftw_complex*)fftw_malloc(sizeof(fftw_complex)*alloc_local);
ierr = VecCreateMPIWithArray(PETSC_COMM_WORLD,1,(PetscInt)local_n0*N1,(PetscInt)N,(const PetscScalar*)data_in,&x);CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject) x, "Real Space vector");CHKERRQ(ierr);
ierr = VecCreateMPIWithArray(PETSC_COMM_WORLD,1,(PetscInt)local_n0*N1,(PetscInt)N,(const PetscScalar*)data_out,&y);CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject) y, "Frequency space vector");CHKERRQ(ierr);
ierr = VecCreateMPIWithArray(PETSC_COMM_WORLD,1,(PetscInt)local_n0*N1,(PetscInt)N,(const PetscScalar*)data_out2,&z);CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject) z, "Reconstructed vector");CHKERRQ(ierr);
fplan = fftw_mpi_plan_dft_2d(N0,N1,data_in,data_out,PETSC_COMM_WORLD,FFTW_FORWARD,FFTW_ESTIMATE);
bplan = fftw_mpi_plan_dft_2d(N0,N1,data_out,data_out2,PETSC_COMM_WORLD,FFTW_BACKWARD,FFTW_ESTIMATE);
ierr = VecSetRandom(x, rdm);CHKERRQ(ierr);
if (view) {ierr = VecView(x,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);}
fftw_execute(fplan);
if (view) {ierr = VecView(y,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);}
fftw_execute(bplan);
/* Compare x and z. FFTW computes an unnormalized DFT, thus z = N*x */
a = 1.0/(PetscReal)N;
ierr = VecScale(z,a);CHKERRQ(ierr);
if (view) {ierr = VecView(z, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);}
ierr = VecAXPY(z,-1.0,x);CHKERRQ(ierr);
ierr = VecNorm(z,NORM_1,&enorm);CHKERRQ(ierr);
if (enorm > 1.e-11 && !rank) {
ierr = PetscPrintf(PETSC_COMM_SELF," Error norm of |x - z| %g\n",(double)enorm);CHKERRQ(ierr);
}
/* Free spaces */
fftw_destroy_plan(fplan);
fftw_destroy_plan(bplan);
fftw_free(data_in); ierr = VecDestroy(&x);CHKERRQ(ierr);
fftw_free(data_out); ierr = VecDestroy(&y);CHKERRQ(ierr);
fftw_free(data_out2);ierr = VecDestroy(&z);CHKERRQ(ierr);
} else {
/* Use PETSc-FFTW interface */
/*-------------------------------------------*/
PetscInt i,*dim,k;
Mat A;
N=1;
for (i=1; i<5; i++) {
DIM = i;
ierr = PetscMalloc1(i,&dim);CHKERRQ(ierr);
for (k=0; k<i; k++) {
dim[k]=30;
}
N *= dim[i-1];
/* Create FFTW object */
if (!rank) printf("Use PETSc-FFTW interface...%d-DIM: %d\n",(int)DIM,(int)N);
ierr = MatCreateFFT(PETSC_COMM_WORLD,DIM,dim,MATFFTW,&A);CHKERRQ(ierr);
/* Create vectors that are compatible with parallel layout of A - must call MatGetVecs()! */
ierr = MatGetVecsFFTW(A,&x,&y,&z);CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject) x, "Real space vector");CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject) y, "Frequency space vector");CHKERRQ(ierr);
ierr = PetscObjectSetName((PetscObject) z, "Reconstructed vector");CHKERRQ(ierr);
/* Set values of space vector x */
ierr = VecSetRandom(x,rdm);CHKERRQ(ierr);
if (view) {ierr = VecView(x,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);}
/* Apply FFTW_FORWARD and FFTW_BACKWARD */
ierr = MatMult(A,x,y);CHKERRQ(ierr);
if (view) {ierr = VecView(y,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);}
ierr = MatMultTranspose(A,y,z);CHKERRQ(ierr);
/* Compare x and z. FFTW computes an unnormalized DFT, thus z = N*x */
a = 1.0/(PetscReal)N;
ierr = VecScale(z,a);CHKERRQ(ierr);
if (view) {ierr = VecView(z,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);}
ierr = VecAXPY(z,-1.0,x);CHKERRQ(ierr);
ierr = VecNorm(z,NORM_1,&enorm);CHKERRQ(ierr);
if (enorm > 1.e-9 && !rank) {
ierr = PetscPrintf(PETSC_COMM_SELF," Error norm of |x - z| %e\n",enorm);CHKERRQ(ierr);
}
ierr = VecDestroy(&x);CHKERRQ(ierr);
ierr = VecDestroy(&y);CHKERRQ(ierr);
ierr = VecDestroy(&z);CHKERRQ(ierr);
ierr = MatDestroy(&A);CHKERRQ(ierr);
ierr = PetscFree(dim);CHKERRQ(ierr);
}
}
ierr = PetscRandomDestroy(&rdm);CHKERRQ(ierr);
ierr = PetscFinalize();
return 0;
}
示例15: PCMGSetType
/*@C
PCMGSetLevels - Sets the number of levels to use with MG.
Must be called before any other MG routine.
Logically Collective on PC
Input Parameters:
+ pc - the preconditioner context
. levels - the number of levels
- comms - optional communicators for each level; this is to allow solving the coarser problems
on smaller sets of processors. Use NULL_OBJECT for default in Fortran
Level: intermediate
Notes:
If the number of levels is one then the multigrid uses the -mg_levels prefix
for setting the level options rather than the -mg_coarse prefix.
.keywords: MG, set, levels, multigrid
.seealso: PCMGSetType(), PCMGGetLevels()
@*/
PetscErrorCode PCMGSetLevels(PC pc,PetscInt levels,MPI_Comm *comms)
{
PetscErrorCode ierr;
PC_MG *mg = (PC_MG*)pc->data;
MPI_Comm comm;
PC_MG_Levels **mglevels = mg->levels;
PetscInt i;
PetscMPIInt size;
const char *prefix;
PC ipc;
PetscInt n;
PetscFunctionBegin;
PetscValidHeaderSpecific(pc,PC_CLASSID,1);
PetscValidLogicalCollectiveInt(pc,levels,2);
ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
if (mg->nlevels == levels) PetscFunctionReturn(0);
if (mglevels) {
/* changing the number of levels so free up the previous stuff */
ierr = PCReset_MG(pc);CHKERRQ(ierr);
n = mglevels[0]->levels;
for (i=0; i<n; i++) {
if (mglevels[i]->smoothd != mglevels[i]->smoothu) {
ierr = KSPDestroy(&mglevels[i]->smoothd);CHKERRQ(ierr);
}
ierr = KSPDestroy(&mglevels[i]->smoothu);CHKERRQ(ierr);
ierr = PetscFree(mglevels[i]);CHKERRQ(ierr);
}
ierr = PetscFree(mg->levels);CHKERRQ(ierr);
}
mg->nlevels = levels;
ierr = PetscMalloc1(levels,&mglevels);CHKERRQ(ierr);
ierr = PetscLogObjectMemory((PetscObject)pc,levels*(sizeof(PC_MG*)));CHKERRQ(ierr);
ierr = PCGetOptionsPrefix(pc,&prefix);CHKERRQ(ierr);
mg->stageApply = 0;
for (i=0; i<levels; i++) {
ierr = PetscNewLog(pc,&mglevels[i]);CHKERRQ(ierr);
mglevels[i]->level = i;
mglevels[i]->levels = levels;
mglevels[i]->cycles = PC_MG_CYCLE_V;
mg->default_smoothu = 2;
mg->default_smoothd = 2;
mglevels[i]->eventsmoothsetup = 0;
mglevels[i]->eventsmoothsolve = 0;
mglevels[i]->eventresidual = 0;
mglevels[i]->eventinterprestrict = 0;
if (comms) comm = comms[i];
ierr = KSPCreate(comm,&mglevels[i]->smoothd);CHKERRQ(ierr);
ierr = KSPSetType(mglevels[i]->smoothd,KSPCHEBYSHEV);CHKERRQ(ierr);
ierr = KSPSetConvergenceTest(mglevels[i]->smoothd,KSPConvergedSkip,NULL,NULL);CHKERRQ(ierr);
ierr = KSPSetNormType(mglevels[i]->smoothd,KSP_NORM_NONE);CHKERRQ(ierr);
ierr = KSPGetPC(mglevels[i]->smoothd,&ipc);CHKERRQ(ierr);
ierr = PCSetType(ipc,PCSOR);CHKERRQ(ierr);
ierr = PetscObjectIncrementTabLevel((PetscObject)mglevels[i]->smoothd,(PetscObject)pc,levels-i);CHKERRQ(ierr);
ierr = KSPSetTolerances(mglevels[i]->smoothd,PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT, i ? mg->default_smoothd : 1);CHKERRQ(ierr);
ierr = KSPSetOptionsPrefix(mglevels[i]->smoothd,prefix);CHKERRQ(ierr);
/* do special stuff for coarse grid */
if (!i && levels > 1) {
ierr = KSPAppendOptionsPrefix(mglevels[0]->smoothd,"mg_coarse_");CHKERRQ(ierr);
/* coarse solve is (redundant) LU by default; set shifttype NONZERO to avoid annoying zero-pivot in LU preconditioner */
ierr = KSPSetType(mglevels[0]->smoothd,KSPPREONLY);CHKERRQ(ierr);
ierr = KSPGetPC(mglevels[0]->smoothd,&ipc);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
if (size > 1) {
KSP innerksp;
PC innerpc;
ierr = PCSetType(ipc,PCREDUNDANT);CHKERRQ(ierr);
ierr = PCRedundantGetKSP(ipc,&innerksp);CHKERRQ(ierr);
ierr = KSPGetPC(innerksp,&innerpc);CHKERRQ(ierr);
ierr = PCFactorSetShiftType(innerpc,MAT_SHIFT_INBLOCKS);CHKERRQ(ierr);
//.........这里部分代码省略.........