本文整理汇总了C++中DMSetUp函数的典型用法代码示例。如果您正苦于以下问题:C++ DMSetUp函数的具体用法?C++ DMSetUp怎么用?C++ DMSetUp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DMSetUp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: DMCreateInterpolation_Composite
PetscErrorCode DMCreateInterpolation_Composite(DM coarse,DM fine,Mat *A,Vec *v)
{
PetscErrorCode ierr;
PetscInt m,n,M,N,nDM,i;
struct DMCompositeLink *nextc;
struct DMCompositeLink *nextf;
Vec gcoarse,gfine,*vecs;
DM_Composite *comcoarse = (DM_Composite*)coarse->data;
DM_Composite *comfine = (DM_Composite*)fine->data;
Mat *mats;
PetscFunctionBegin;
PetscValidHeaderSpecific(coarse,DM_CLASSID,1);
PetscValidHeaderSpecific(fine,DM_CLASSID,2);
ierr = DMSetUp(coarse);CHKERRQ(ierr);
ierr = DMSetUp(fine);CHKERRQ(ierr);
/* use global vectors only for determining matrix layout */
ierr = DMGetGlobalVector(coarse,&gcoarse);CHKERRQ(ierr);
ierr = DMGetGlobalVector(fine,&gfine);CHKERRQ(ierr);
ierr = VecGetLocalSize(gcoarse,&n);CHKERRQ(ierr);
ierr = VecGetLocalSize(gfine,&m);CHKERRQ(ierr);
ierr = VecGetSize(gcoarse,&N);CHKERRQ(ierr);
ierr = VecGetSize(gfine,&M);CHKERRQ(ierr);
ierr = DMRestoreGlobalVector(coarse,&gcoarse);CHKERRQ(ierr);
ierr = DMRestoreGlobalVector(fine,&gfine);CHKERRQ(ierr);
nDM = comfine->nDM;
if (nDM != comcoarse->nDM) SETERRQ2(((PetscObject)fine)->comm,PETSC_ERR_ARG_INCOMP,"Fine DMComposite has %D entries, but coarse has %D",nDM,comcoarse->nDM);
ierr = PetscMalloc(nDM*nDM*sizeof(Mat),&mats);CHKERRQ(ierr);
ierr = PetscMemzero(mats,nDM*nDM*sizeof(Mat));CHKERRQ(ierr);
if (v) {
ierr = PetscMalloc(nDM*sizeof(Vec),&vecs);CHKERRQ(ierr);
ierr = PetscMemzero(vecs,nDM*sizeof(Vec));CHKERRQ(ierr);
}
/* loop over packed objects, handling one at at time */
for (nextc=comcoarse->next,nextf=comfine->next,i=0; nextc; nextc=nextc->next,nextf=nextf->next,i++) {
if (!v) {
ierr = DMCreateInterpolation(nextc->dm,nextf->dm,&mats[i*nDM+i],PETSC_NULL);CHKERRQ(ierr);
} else {
ierr = DMCreateInterpolation(nextc->dm,nextf->dm,&mats[i*nDM+i],&vecs[i]);CHKERRQ(ierr);
}
}
ierr = MatCreateNest(((PetscObject)fine)->comm,nDM,PETSC_NULL,nDM,PETSC_NULL,mats,A);CHKERRQ(ierr);
if (v) {
ierr = VecCreateNest(((PetscObject)fine)->comm,nDM,PETSC_NULL,vecs,v);CHKERRQ(ierr);
}
for (i=0; i<nDM*nDM; i++) {ierr = MatDestroy(&mats[i]);CHKERRQ(ierr);}
ierr = PetscFree(mats);CHKERRQ(ierr);
if (v) {
for (i=0; i<nDM; i++) {ierr = VecDestroy(&vecs[i]);CHKERRQ(ierr);}
ierr = PetscFree(vecs);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
示例2: main
int main(int argc, char **argv)
{
Mat A;
KSP ksp;
DM shell;
Vec *left, *right;
MPI_Comm c;
PetscErrorCode ierr;
ierr = PetscInitialize(&argc, &argv, NULL, NULL);if (ierr) return ierr;
c = PETSC_COMM_WORLD;
ierr = MatCreate(c, &A); CHKERRQ(ierr);
ierr = MatSetSizes(A, 1, 1, PETSC_DECIDE, PETSC_DECIDE); CHKERRQ(ierr);
ierr = MatSetFromOptions(A); CHKERRQ(ierr);
ierr = MatSetUp(A); CHKERRQ(ierr);
ierr = KSPCreate(c, &ksp); CHKERRQ(ierr);
ierr = KSPSetOperators(ksp, A, A); CHKERRQ(ierr);
ierr = KSPSetFromOptions(ksp); CHKERRQ(ierr);
ierr = DMShellCreate(c, &shell); CHKERRQ(ierr);
ierr = DMSetFromOptions(shell); CHKERRQ(ierr);
ierr = DMSetUp(shell); CHKERRQ(ierr);
ierr = KSPSetDM(ksp, shell); CHKERRQ(ierr);
ierr = KSPCreateVecs(ksp, 1, &right, 1, &left); CHKERRQ(ierr);
ierr = VecView(right[0], PETSC_VIEWER_STDOUT_(c));CHKERRQ(ierr);
ierr = VecDestroyVecs(1,&right); CHKERRQ(ierr);
ierr = VecDestroyVecs(1,&left); CHKERRQ(ierr);
ierr = DMDestroy(&shell); CHKERRQ(ierr);
ierr = KSPDestroy(&ksp); CHKERRQ(ierr);
ierr = MatDestroy(&A); CHKERRQ(ierr);
PetscFinalize();
return 0;
}
示例3: DMDACreate1d
PetscErrorCode SingleBodyPoints::createDMDA()
{
PetscErrorCode ierr;
DMDALocalInfo lclInfo;
PetscFunctionBeginUser;
ierr = DMDACreate1d(comm, DM_BOUNDARY_NONE, nPts, dim, 0, nullptr, &da);
CHKERRQ(ierr);
ierr = DMSetUp(da); CHKERRQ(ierr);
ierr = DMDAGetLocalInfo(da, &lclInfo); CHKERRQ(ierr);
// copy necessary local info
bgPt = lclInfo.xs;
nLclPts = lclInfo.xm;
edPt = bgPt + nLclPts;
// gather local info from other processes
ierr = MPI_Allgather(&nLclPts, 1, MPIU_INT, nLclAllProcs.data(), 1,
MPIU_INT, comm); CHKERRQ(ierr);
// each point has "dim" degree of freedom, so we have to multiply that
for (auto &it : nLclAllProcs) it *= dim;
// calculate the offset of the unpacked DM
for (PetscMPIInt r = mpiSize - 1; r > 0; r--)
offsetsAllProcs[r] = nLclAllProcs[r - 1];
for (PetscMPIInt r = 1; r < mpiSize; r++)
offsetsAllProcs[r] += offsetsAllProcs[r - 1];
PetscFunctionReturn(0);
} // createDMDA
示例4: main
int main(int argc,char **argv)
{
DM dm;
Vec X,Y;
PetscErrorCode ierr;
PetscInt dof = 10;
ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr;
ierr = PetscOptionsGetInt(NULL,NULL,"-dof",&dof,NULL);CHKERRQ(ierr);
ierr = DMDACreate3d(PETSC_COMM_WORLD,DM_BOUNDARY_NONE,DM_BOUNDARY_NONE,DM_BOUNDARY_NONE,DMDA_STENCIL_STAR,-128,-128,-128,PETSC_DECIDE,PETSC_DECIDE,PETSC_DECIDE,dof,1,NULL,NULL,NULL,&dm);CHKERRQ(ierr);
ierr = DMSetFromOptions(dm);CHKERRQ(ierr);
ierr = DMSetUp(dm);CHKERRQ(ierr);
ierr = PetscMemoryTrace("DMDACreate3d ");CHKERRQ(ierr);
ierr = DMCreateGlobalVector(dm,&X);CHKERRQ(ierr);
ierr = PetscMemoryTrace("DMCreateGlobalVector");
ierr = DMCreateGlobalVector(dm,&Y);CHKERRQ(ierr);
ierr = PetscMemoryTrace("DMCreateGlobalVector");CHKERRQ(ierr);
ierr = VecDestroy(&X);CHKERRQ(ierr);
ierr = VecDestroy(&Y);CHKERRQ(ierr);
ierr = DMDestroy(&dm);CHKERRQ(ierr);
ierr = PetscFinalize();
return ierr;
}
示例5: main
int main(int argc, char **argv)
{
DM dm;
char typeString[256] = {'\0'};
PetscViewer viewer = NULL;
PetscBool flg;
PetscErrorCode ierr;
ierr = PetscInitialize(&argc, &argv, NULL,help);if (ierr) return ierr;
ierr = DMCreate(PETSC_COMM_WORLD, &dm);CHKERRQ(ierr);
ierr = PetscStrncpy(typeString,DMFOREST,256);CHKERRQ(ierr);
ierr = PetscOptionsBegin(PETSC_COMM_WORLD,NULL,"DM Forest example options",NULL);CHKERRQ(ierr);
ierr = PetscOptionsString("-dm_type","The type of the dm",NULL,DMFOREST,typeString,sizeof(typeString),NULL);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
ierr = DMSetType(dm,(DMType) typeString);CHKERRQ(ierr);
ierr = DMSetFromOptions(dm);CHKERRQ(ierr);
ierr = DMSetUp(dm);CHKERRQ(ierr);
ierr = PetscOptionsGetViewer(PETSC_COMM_WORLD,NULL,"-dm_view",&viewer,NULL,&flg);CHKERRQ(ierr);
if (flg) {
ierr = DMView(dm,viewer);CHKERRQ(ierr);
}
ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
ierr = DMDestroy(&dm);CHKERRQ(ierr);
ierr = PetscFinalize();
return ierr;
}
示例6: DMCompositeGetAccess
/*@C
DMCompositeRestoreAccess - Returns the vectors obtained with DMCompositeGetAccess()
representation.
Collective on DMComposite
Input Parameters:
+ dm - the packer object
. gvec - the global vector
- Vec* ... - the individual parallel vectors, PETSC_NULL for those that are not needed
Level: advanced
.seealso DMCompositeAddDM(), DMCreateGlobalVector(),
DMCompositeGather(), DMCompositeCreate(), DMCompositeGetISLocalToGlobalMappings(), DMCompositeScatter(),
DMCompositeRestoreAccess(), DMCompositeGetAccess()
@*/
PetscErrorCode DMCompositeRestoreAccess(DM dm,Vec gvec,...)
{
va_list Argp;
PetscErrorCode ierr;
struct DMCompositeLink *next;
DM_Composite *com = (DM_Composite*)dm->data;
PetscFunctionBegin;
PetscValidHeaderSpecific(dm,DM_CLASSID,1);
PetscValidHeaderSpecific(gvec,VEC_CLASSID,2);
next = com->next;
if (!com->setup) {
ierr = DMSetUp(dm);CHKERRQ(ierr);
}
/* loop over packed objects, handling one at at time */
va_start(Argp,gvec);
while (next) {
Vec *vec;
vec = va_arg(Argp, Vec*);
if (vec) {
ierr = VecResetArray(*vec);CHKERRQ(ierr);
ierr = DMRestoreGlobalVector(next->dm,vec);CHKERRQ(ierr);
}
next = next->next;
}
va_end(Argp);
PetscFunctionReturn(0);
}
示例7: DMCoarsen_Composite
PetscErrorCode DMCoarsen_Composite(DM dmi,MPI_Comm comm,DM *fine)
{
PetscErrorCode ierr;
struct DMCompositeLink *next;
DM_Composite *com = (DM_Composite*)dmi->data;
DM dm;
PetscFunctionBegin;
PetscValidHeaderSpecific(dmi,DM_CLASSID,1);
ierr = DMSetUp(dmi);CHKERRQ(ierr);
if (comm == MPI_COMM_NULL) {
ierr = PetscObjectGetComm((PetscObject)dmi,&comm);CHKERRQ(ierr);
}
next = com->next;
ierr = DMCompositeCreate(comm,fine);CHKERRQ(ierr);
/* loop over packed objects, handling one at at time */
while (next) {
ierr = DMCoarsen(next->dm,comm,&dm);CHKERRQ(ierr);
ierr = DMCompositeAddDM(*fine,dm);CHKERRQ(ierr);
ierr = PetscObjectDereference((PetscObject)dm);CHKERRQ(ierr);
next = next->next;
}
PetscFunctionReturn(0);
}
示例8: DMCreateLibMesh
void PetscDMNonlinearSolver<T>::init()
{
PetscErrorCode ierr;
DM dm;
this->PetscNonlinearSolver<T>::init();
// Attaching a DM with the function and Jacobian callbacks to SNES.
ierr = DMCreateLibMesh(libMesh::COMM_WORLD, this->system(), &dm); CHKERRABORT(libMesh::COMM_WORLD, ierr);
ierr = DMSetFromOptions(dm); CHKERRABORT(libMesh::COMM_WORLD, ierr);
ierr = DMSetUp(dm); CHKERRABORT(libMesh::COMM_WORLD, ierr);
ierr = SNESSetDM(this->_snes, dm); CHKERRABORT(libMesh::COMM_WORLD, ierr);
// SNES now owns the reference to dm.
ierr = DMDestroy(&dm); CHKERRABORT(libMesh::COMM_WORLD, ierr);
KSP ksp;
ierr = SNESGetKSP (this->_snes, &ksp); CHKERRABORT(libMesh::COMM_WORLD,ierr);
// Set the tolerances for the iterative solver. Use the user-supplied
// tolerance for the relative residual & leave the others at default values
ierr = KSPSetTolerances (ksp, this->initial_linear_tolerance, PETSC_DEFAULT,PETSC_DEFAULT, this->max_linear_iterations); CHKERRABORT(libMesh::COMM_WORLD,ierr);
// Set the tolerances for the non-linear solver.
ierr = SNESSetTolerances(this->_snes,
this->absolute_residual_tolerance,
this->relative_residual_tolerance,
this->absolute_step_tolerance,
this->max_nonlinear_iterations,
this->max_function_evaluations);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
//Pull in command-line options
KSPSetFromOptions(ksp);
SNESSetFromOptions(this->_snes);
}
示例9: petscSetupDM
void petscSetupDM (NonlinearSystem & nl) {
#if !PETSC_VERSION_LESS_THAN(3,3,0)
PetscErrorCode ierr;
// Initialize the part of the DM package that's packaged with Moose; in the PETSc source tree this call would be in DMInitializePackage()
ierr = DMMooseRegisterAll();
CHKERRABORT(nl.comm().get(),ierr);
// Create and set up the DM that will consume the split options and deal with block matrices.
PetscNonlinearSolver<Number> *petsc_solver = dynamic_cast<PetscNonlinearSolver<Number> *>(nl.sys().nonlinear_solver.get());
SNES snes = petsc_solver->snes();
/* FIXME: reset the DM, do not recreate it anew every time? */
DM dm = PETSC_NULL;
ierr = DMCreateMoose(nl.comm().get(), nl, &dm);
CHKERRABORT(nl.comm().get(),ierr);
ierr = DMSetFromOptions(dm);
CHKERRABORT(nl.comm().get(),ierr);
ierr = DMSetUp(dm);
CHKERRABORT(nl.comm().get(),ierr);
ierr = SNESSetDM(snes,dm);
CHKERRABORT(nl.comm().get(),ierr);
ierr = DMDestroy(&dm);
CHKERRABORT(nl.comm().get(),ierr);
ierr = SNESSetUpdate(snes,SNESUpdateDMMoose);
CHKERRABORT(nl.comm().get(),ierr);
#endif
}
示例10: DMSetUp_Patch
PetscErrorCode DMSetUp_Patch(DM dm)
{
DM_Patch *mesh = (DM_Patch*) dm->data;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
ierr = DMSetUp(mesh->dmCoarse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例11: DMADDACreate
/*@C
DMADDACreate - Creates and ADDA object that translate between coordinates
in a geometric grid of arbitrary dimension and data in a PETSc vector
distributed on several processors.
Collective on MPI_Comm
Input Parameters:
+ comm - MPI communicator
. dim - the dimension of the grid
. nodes - array with d entries that give the number of nodes in each dimension
. procs - array with d entries that give the number of processors in each dimension
(or NULL if to be determined automatically)
. dof - number of degrees of freedom per node
- periodic - array with d entries that, i-th entry is set to true iff dimension i is periodic
Output Parameters:
. adda - pointer to ADDA data structure that is created
Level: intermediate
@*/
PetscErrorCode DMADDACreate(MPI_Comm comm, PetscInt dim, PetscInt *nodes,PetscInt *procs,PetscInt dof, PetscBool *periodic,DM *dm_p)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = DMCreate(comm,dm_p);CHKERRQ(ierr);
ierr = DMSetType(*dm_p,DMADDA);CHKERRQ(ierr);
ierr = DMADDASetParameters(*dm_p,dim,nodes,procs,dof,periodic);CHKERRQ(ierr);
ierr = DMSetUp(*dm_p);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例12: DMDestroy
/*@
DMShellCreate - Creates a shell DM object, used to manage user-defined problem data
Collective on MPI_Comm
Input Parameter:
. comm - the processors that will share the global vector
Output Parameters:
. shell - the shell DM
Level: advanced
.seealso DMDestroy(), DMCreateGlobalVector(), DMCreateLocalVector()
@*/
PetscErrorCode DMShellCreate(MPI_Comm comm,DM *dm)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidPointer(dm,2);
ierr = DMCreate(comm,dm);CHKERRQ(ierr);
ierr = DMSetType(*dm,DMSHELL);CHKERRQ(ierr);
ierr = DMSetUp(*dm);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例13: ISLocalToGlobalMappingDestroy
/*@C
DMCompositeGetISLocalToGlobalMappings - gets an ISLocalToGlobalMapping for each DM in the DMComposite, maps to the composite global space
Collective on DM
Input Parameter:
. dm - the packer object
Output Parameters:
. ltogs - the individual mappings for each packed vector. Note that this includes
all the ghost points that individual ghosted DMDA's may have.
Level: advanced
Notes:
Each entry of ltogs should be destroyed with ISLocalToGlobalMappingDestroy(), the ltogs array should be freed with PetscFree().
.seealso DMDestroy(), DMCompositeAddDM(), DMCreateGlobalVector(),
DMCompositeGather(), DMCompositeCreate(), DMCompositeGetAccess(), DMCompositeScatter(),
DMCompositeGetLocalVectors(), DMCompositeRestoreLocalVectors(),DMCompositeGetEntries()
@*/
PetscErrorCode DMCompositeGetISLocalToGlobalMappings(DM dm,ISLocalToGlobalMapping **ltogs)
{
PetscErrorCode ierr;
PetscInt i,*idx,n,cnt;
struct DMCompositeLink *next;
PetscMPIInt rank;
DM_Composite *com = (DM_Composite*)dm->data;
PetscFunctionBegin;
PetscValidHeaderSpecific(dm,DM_CLASSID,1);
ierr = DMSetUp(dm);CHKERRQ(ierr);
ierr = PetscMalloc((com->nDM)*sizeof(ISLocalToGlobalMapping),ltogs);CHKERRQ(ierr);
next = com->next;
ierr = MPI_Comm_rank(((PetscObject)dm)->comm,&rank);CHKERRQ(ierr);
/* loop over packed objects, handling one at at time */
cnt = 0;
while (next) {
ISLocalToGlobalMapping ltog;
PetscMPIInt size;
const PetscInt *suboff,*indices;
Vec global;
/* Get sub-DM global indices for each local dof */
ierr = DMGetLocalToGlobalMapping(next->dm,<og);CHKERRQ(ierr);
ierr = ISLocalToGlobalMappingGetSize(ltog,&n);CHKERRQ(ierr);
ierr = ISLocalToGlobalMappingGetIndices(ltog,&indices);CHKERRQ(ierr);
ierr = PetscMalloc(n*sizeof(PetscInt),&idx);CHKERRQ(ierr);
/* Get the offsets for the sub-DM global vector */
ierr = DMGetGlobalVector(next->dm,&global);CHKERRQ(ierr);
ierr = VecGetOwnershipRanges(global,&suboff);CHKERRQ(ierr);
ierr = MPI_Comm_size(((PetscObject)global)->comm,&size);CHKERRQ(ierr);
/* Shift the sub-DM definition of the global space to the composite global space */
for (i=0; i<n; i++) {
PetscInt subi = indices[i],lo = 0,hi = size,t;
/* Binary search to find which rank owns subi */
while (hi-lo > 1) {
t = lo + (hi-lo)/2;
if (suboff[t] > subi) hi = t;
else lo = t;
}
idx[i] = subi - suboff[lo] + next->grstarts[lo];
}
ierr = ISLocalToGlobalMappingRestoreIndices(ltog,&indices);CHKERRQ(ierr);
ierr = ISLocalToGlobalMappingCreate(((PetscObject)dm)->comm,n,idx,PETSC_OWN_POINTER,&(*ltogs)[cnt]);CHKERRQ(ierr);
ierr = DMRestoreGlobalVector(next->dm,&global);CHKERRQ(ierr);
next = next->next;
cnt++;
}
PetscFunctionReturn(0);
}
示例14: DMDestroy
/*@C
DMRedundantCreate - Creates a DM object, used to manage data for dense globally coupled variables
Collective on MPI_Comm
Input Parameter:
+ comm - the processors that will share the global vector
. rank - rank to own the redundant values
- N - total number of degrees of freedom
Output Parameters:
. red - the redundant DM
Level: advanced
.seealso DMDestroy(), DMCreateGlobalVector(), DMCreateMatrix(), DMCompositeAddDM(), DMREDUNDANT, DMSetType(), DMRedundantSetSize(), DMRedundantGetSize()
@*/
PetscErrorCode DMRedundantCreate(MPI_Comm comm,PetscInt rank,PetscInt N,DM *dm)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidPointer(dm,2);
ierr = DMCreate(comm,dm);CHKERRQ(ierr);
ierr = DMSetType(*dm,DMREDUNDANT);CHKERRQ(ierr);
ierr = DMRedundantSetSize(*dm,rank,N);CHKERRQ(ierr);
ierr = DMSetUp(*dm);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
示例15: PETScExternalSolverCreate
PetscErrorCode
PETScExternalSolverCreate(MPI_Comm comm, TS * ts)
{
DM da;
PetscErrorCode ierr;
PetscFunctionBeginUser;
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Create distributed array (DMDA) to manage parallel grid and vectors
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
ierr = DMDACreate2d(comm,
DM_BOUNDARY_NONE,
DM_BOUNDARY_NONE,
DMDA_STENCIL_STAR,
11,
11,
PETSC_DECIDE,
PETSC_DECIDE,
1,
1,
NULL,
NULL,
&da);
CHKERRQ(ierr);
ierr = DMSetFromOptions(da);
CHKERRQ(ierr);
ierr = DMSetUp(da);
CHKERRQ(ierr);
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Create timestepping solver context
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
ierr = TSCreate(comm, ts);
CHKERRQ(ierr);
ierr = TSSetProblemType(*ts, TS_NONLINEAR);
CHKERRQ(ierr);
ierr = TSSetType(*ts, TSBEULER);
CHKERRQ(ierr);
ierr = TSSetDM(*ts, da);
CHKERRQ(ierr);
ierr = DMDestroy(&da);
CHKERRQ(ierr);
ierr = TSSetIFunction(*ts, NULL, FormIFunction, nullptr);
CHKERRQ(ierr);
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Set Jacobian evaluation routine
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
ierr = TSSetIJacobian(*ts, NULL, NULL, FormIJacobian, NULL);
CHKERRQ(ierr);
PetscFunctionReturn(0);
}