本文整理汇总了C++中CHKERRABORT函数的典型用法代码示例。如果您正苦于以下问题:C++ CHKERRABORT函数的具体用法?C++ CHKERRABORT怎么用?C++ CHKERRABORT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了CHKERRABORT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ApplyInvDavidsonDiagPrecPETSc
void ApplyInvDavidsonDiagPrecPETSc(void *x, PRIMME_INT *ldx, void *y,
PRIMME_INT *ldy, int *blockSize, primme_params *primme, int *err) {
int i, j;
double shift, d, minDenominator;
SCALAR *xvec, *yvec;
const int nLocal = primme->nLocal, bs = *blockSize;
const PetscScalar *diag;
Vec vec;
PetscErrorCode ierr;
vec = *(Vec *)primme->preconditioner;
xvec = (SCALAR *)x;
yvec = (SCALAR *)y;
minDenominator = 1e-14*(primme->aNorm >= 0.0L ? primme->aNorm : 1.);
ierr = VecGetArrayRead(vec, &diag); CHKERRABORT(*(MPI_Comm*)primme->commInfo, ierr);
for (i=0; i<bs; i++) {
shift = primme->ShiftsForPreconditioner[i];
for (j=0; j<nLocal; j++) {
d = diag[j] - shift;
d = (fabs(d) > minDenominator) ? d : copysign(minDenominator, d);
yvec[*ldy*i+j] = xvec[*ldx*i+j]/d;
}
}
ierr = VecRestoreArrayRead(vec, &diag); CHKERRABORT(*(MPI_Comm*)primme->commInfo, ierr);
*err = 0;
}
示例2: prod
void prod( sparse_matrix_type const& A,
vector_type const& x,
vector_type& b ) const
{
int ierr = 0;
petsc_sparse_matrix_type const& _A = dynamic_cast<petsc_sparse_matrix_type const&>( A );
petsc_vector_type const& _x = dynamic_cast<petsc_vector_type const&>( x );
petsc_vector_type const& _b = dynamic_cast<petsc_vector_type const&>( b );
if ( _A.mapCol().worldComm().globalSize() == x.map().worldComm().globalSize() )
{
//std::cout << "BackendPetsc::prod STANDART"<< std::endl;
ierr = MatMult( _A.mat(), _x.vec(), _b.vec() );
CHKERRABORT( _A.comm().globalComm(),ierr );
}
else
{
//std::cout << "BackendPetsc::prod with convert"<< std::endl;
auto x_convert = petscMPI_vector_type(_A.mapColPtr());
x_convert.duplicateFromOtherPartition(x);
x_convert.close();
ierr = MatMult( _A.mat(), x_convert.vec(), _b.vec() );
CHKERRABORT( _A.comm().globalComm(),ierr );
}
b.close();
}
示例3: SNESGetKSP
void
PetscOutputter::timestepSetupInternal()
{
// Only execute if PETSc exists
#ifdef LIBMESH_HAVE_PETSC
// Extract the non-linear and linear solvers from PETSc
NonlinearSystem & nl = _problem_ptr->getNonlinearSystem();
PetscNonlinearSolver<Number> * petsc_solver = dynamic_cast<PetscNonlinearSolver<Number> *>(nl.sys().nonlinear_solver.get());
SNES snes = petsc_solver->snes();
KSP ksp;
SNESGetKSP(snes, &ksp);
// Update the pseudo times
_nonlinear_time = _time_old; // non-linear time starts with the previous time step
_nonlinear_dt = _dt/_nonlinear_dt_divisor; // set the pseudo non-linear timestep
_linear_dt = _nonlinear_dt/_linear_dt_divisor; // set the pseudo linear timestep
// Set the PETSc monitor functions
if (_output_nonlinear || (_time >= _nonlinear_start_time - _t_tol && _time <= _nonlinear_end_time + _t_tol) )
{
PetscErrorCode ierr = SNESMonitorSet(snes, petscNonlinearOutput, this, PETSC_NULL);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
}
if (_output_linear || (_time >= _linear_start_time - _t_tol && _time <= _linear_end_time + _t_tol) )
{
PetscErrorCode ierr = KSPMonitorSet(ksp, petscLinearOutput, this, PETSC_NULL);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
}
#endif
}
示例4: START_LOG
void PetscDiffSolver::init ()
{
START_LOG("init()", "PetscDiffSolver");
Parent::init();
int ierr=0;
#if PETSC_VERSION_LESS_THAN(2,1,2)
// At least until Petsc 2.1.1, the SNESCreate had a different
// calling syntax. The second argument was of type SNESProblemType,
// and could have a value of either SNES_NONLINEAR_EQUATIONS or
// SNES_UNCONSTRAINED_MINIMIZATION.
ierr = SNESCreate(libMesh::COMM_WORLD, SNES_NONLINEAR_EQUATIONS, &_snes);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
#else
ierr = SNESCreate(libMesh::COMM_WORLD,&_snes);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
#endif
#if PETSC_VERSION_LESS_THAN(2,3,3)
ierr = SNESSetMonitor (_snes, __libmesh_petsc_diff_solver_monitor,
this, PETSC_NULL);
#else
// API name change in PETSc 2.3.3
ierr = SNESMonitorSet (_snes, __libmesh_petsc_diff_solver_monitor,
this, PETSC_NULL);
#endif
CHKERRABORT(libMesh::COMM_WORLD,ierr);
ierr = SNESSetFromOptions(_snes);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
STOP_LOG("init()", "PetscDiffSolver");
}
示例5: PetscTimeStepper
PetscTimeStepper(FEProblem &feproblem) : TimeStepper(feproblem) {
PetscErrorCode ierr;
ierr = TSCreate(libMesh::COMM_WORLD, &this->_ts);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
ierr = TSSetApplicationContext(this->_ts,this);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
};
示例6: MatCreateShell
std::pair<unsigned int, unsigned int>
SlepcEigenSolver<T>::solve_standard (ShellMatrix<T> &shell_matrix,
int nev, // number of requested eigenpairs
int ncv, // number of basis vectors
const double tol, // solver tolerance
const unsigned int m_its) // maximum number of iterations
{
this->init ();
int ierr=0;
// Prepare the matrix.
Mat mat;
ierr = MatCreateShell(libMesh::COMM_WORLD,
shell_matrix.m(), // Specify the number of local rows
shell_matrix.n(), // Specify the number of local columns
PETSC_DETERMINE,
PETSC_DETERMINE,
const_cast<void*>(static_cast<const void*>(&shell_matrix)),
&mat);
/* Note that the const_cast above is only necessary because PETSc
does not accept a const void*. Inside the member function
_petsc_shell_matrix() below, the pointer is casted back to a
const ShellMatrix<T>*. */
CHKERRABORT(libMesh::COMM_WORLD,ierr);
ierr = MatShellSetOperation(mat,MATOP_MULT,reinterpret_cast<void(*)(void)>(_petsc_shell_matrix_mult));
ierr = MatShellSetOperation(mat,MATOP_GET_DIAGONAL,reinterpret_cast<void(*)(void)>(_petsc_shell_matrix_get_diagonal));
CHKERRABORT(libMesh::COMM_WORLD,ierr);
return _solve_standard_helper(mat, nev, ncv, tol, m_its);
}
示例7: TSFunction_Sundials
int TSFunction_Sundials(realtype t,N_Vector y,N_Vector ydot,void *ctx)
{
TS ts = (TS) ctx;
MPI_Comm comm = ((PetscObject)ts)->comm;
TS_Sundials *cvode = (TS_Sundials*)ts->data;
Vec yy = cvode->w1,yyd = cvode->w2,yydot = cvode->ydot;
PetscScalar *y_data,*ydot_data;
PetscErrorCode ierr;
PetscFunctionBegin;
/* Make the PETSc work vectors yy and yyd point to the arrays in the SUNDIALS vectors y and ydot respectively*/
y_data = (PetscScalar *) N_VGetArrayPointer(y);
ydot_data = (PetscScalar *) N_VGetArrayPointer(ydot);
ierr = VecPlaceArray(yy,y_data);CHKERRABORT(comm,ierr);
ierr = VecPlaceArray(yyd,ydot_data); CHKERRABORT(comm,ierr);
/* now compute the right hand side function */
if (!ts->userops->ifunction) {
ierr = TSComputeRHSFunction(ts,t,yy,yyd);CHKERRQ(ierr);
} else { /* If rhsfunction is also set, this computes both parts and shifts them to the right */
ierr = VecZeroEntries(yydot);CHKERRQ(ierr);
ierr = TSComputeIFunction(ts,t,yy,yydot,yyd,PETSC_FALSE); CHKERRABORT(comm,ierr);
ierr = VecScale(yyd,-1.);CHKERRQ(ierr);
}
ierr = VecResetArray(yy); CHKERRABORT(comm,ierr);
ierr = VecResetArray(yyd); CHKERRABORT(comm,ierr);
PetscFunctionReturn(0);
}
示例8: TSFunction_Sundials
int TSFunction_Sundials(realtype t,N_Vector y,N_Vector ydot,void *ctx)
{
TS ts = (TS) ctx;
DM dm;
DMTS tsdm;
TSIFunction ifunction;
MPI_Comm comm;
TS_Sundials *cvode = (TS_Sundials*)ts->data;
Vec yy = cvode->w1,yyd = cvode->w2,yydot = cvode->ydot;
PetscScalar *y_data,*ydot_data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectGetComm((PetscObject)ts,&comm);CHKERRQ(ierr);
/* Make the PETSc work vectors yy and yyd point to the arrays in the SUNDIALS vectors y and ydot respectively*/
y_data = (PetscScalar*) N_VGetArrayPointer(y);
ydot_data = (PetscScalar*) N_VGetArrayPointer(ydot);
ierr = VecPlaceArray(yy,y_data);CHKERRABORT(comm,ierr);
ierr = VecPlaceArray(yyd,ydot_data);CHKERRABORT(comm,ierr);
/* Now compute the right hand side function, via IFunction unless only the more efficient RHSFunction is set */
ierr = TSGetDM(ts,&dm);CHKERRQ(ierr);
ierr = DMGetDMTS(dm,&tsdm);CHKERRQ(ierr);
ierr = DMTSGetIFunction(dm,&ifunction,NULL);CHKERRQ(ierr);
if (!ifunction) {
ierr = TSComputeRHSFunction(ts,t,yy,yyd);CHKERRQ(ierr);
} else { /* If rhsfunction is also set, this computes both parts and shifts them to the right */
ierr = VecZeroEntries(yydot);CHKERRQ(ierr);
ierr = TSComputeIFunction(ts,t,yy,yydot,yyd,PETSC_FALSE);CHKERRABORT(comm,ierr);
ierr = VecScale(yyd,-1.);CHKERRQ(ierr);
}
ierr = VecResetArray(yy);CHKERRABORT(comm,ierr);
ierr = VecResetArray(yyd);CHKERRABORT(comm,ierr);
PetscFunctionReturn(0);
}
示例9: PetscInitialized
void
SolverLinearPetsc<T>::clear ()
{
PetscBool pinit;
PetscInitialized( &pinit );
if ( pinit && this->initialized() )
{
this->setInitialized( false );
int ierr=0;
// 2.1.x & earlier style
#if (PETSC_VERSION_MAJOR == 2) && (PETSC_VERSION_MINOR <= 1)
ierr = SLESDestroy( M_sles );
CHKERRABORT( this->worldComm().globalComm(),ierr );
// 2.2.0 & newer style
#else
FEELPP_ASSERT( M_ksp != 0 ).error( "invalid ksp" );
ierr = PETSc::KSPDestroy( M_ksp );
CHKERRABORT( this->worldComm().globalComm(),ierr );
#endif
// Mimic PETSc default solver and preconditioner
this->setSolverType( GMRES );
if ( this->worldComm().globalComm().size() == 1 )
this->setPreconditionerType( LU_PRECOND );
else
this->setPreconditionerType( BLOCK_JACOBI_PRECOND );
}
}
示例10: setStepLimits
virtual void setStepLimits(Real dtmin,Real dtmax) {
PetscErrorCode ierr;
TSAdapt adapt;
ierr = TSGetAdapt(this->_ts,&adapt);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
ierr = TSAdaptSetStepLimits(adapt,dtmin,dtmax);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
}
示例11: assert
inline void PetscVector::init (const int n,
const int n_local,
const std::vector<int>& ghost,
const bool fast,
const ParallelType type) {
int ierr=0;
PetscInt petsc_n=static_cast<int>(n);
PetscInt petsc_n_local=static_cast<int>(n_local);
PetscInt petsc_n_ghost=static_cast<int>(ghost.size());
// If the mesh is not disjoint, every processor will either have
// all the dofs, none of the dofs, or some non-zero dofs at the
// boundary between processors.
//
// However we can't assert this, because someone might want to
// construct a GHOSTED vector which doesn't include neighbor element
// dofs. Boyce tried to do so in user code, and we're going to want
// to do so in System::project_vector().
//
// libmesh_assert(n_local == 0 || n_local == n || !ghost.empty());
assert(sizeof(PetscInt) == sizeof(int));
// If the mesh is disjoint, the following assertion will fail.
// If the mesh is not disjoint, every processor will either have
// all the dofs, none of the dofs, or some non-zero dofs at the
// boundary between processors.
//assert(n_local == 0 || n_local == n || !ghost.empty());
PetscInt* petsc_ghost = ghost.empty() ? PETSC_NULL :
const_cast<int*>(reinterpret_cast<const PetscInt*>(&ghost[0]));
// Clear initialized vectors
if (this->initialized()) this->clear();
assert(type == AUTOMATIC || type == GHOSTED);
this->_type = GHOSTED;
/* Make the global-to-local ghost cell map. */
for (int i=0; i<(int)ghost.size(); i++){
_global_to_local_map[ghost[i]] = i;
}
/* Create vector. */
ierr = VecCreateGhost (MPI_COMM_WORLD, petsc_n_local, petsc_n,
petsc_n_ghost, petsc_ghost, &_vec);
CHKERRABORT(MPI_COMM_WORLD,ierr);
ierr = VecSetFromOptions (_vec);
CHKERRABORT(MPI_COMM_WORLD,ierr);
this->_is_initialized = true;
this->_is_closed = true;
if (fast == false)
this->zero ();
}
示例12: START_LOG
std::pair<unsigned int, Real>
PetscDMNonlinearSolver<T>::solve (SparseMatrix<T>& jac_in, // System Jacobian Matrix
NumericVector<T>& x_in, // Solution vector
NumericVector<T>& r_in, // Residual vector
const double, // Stopping tolerance
const unsigned int)
{
START_LOG("solve()", "PetscNonlinearSolver");
this->init ();
// Make sure the data passed in are really of Petsc types
libmesh_cast_ptr<PetscMatrix<T>*>(&jac_in);
libmesh_cast_ptr<PetscVector<T>*>(&r_in);
// Extract solution vector
PetscVector<T>* x = libmesh_cast_ptr<PetscVector<T>*>(&x_in);
int ierr=0;
int n_iterations =0;
// Should actually be a PetscReal, but I don't know which version of PETSc first introduced PetscReal
Real final_residual_norm=0.;
if (this->user_presolve)
this->user_presolve(this->system());
//Set the preconditioning matrix
if (this->_preconditioner)
this->_preconditioner->set_matrix(jac_in);
ierr = SNESSolve (this->_snes, PETSC_NULL, x->vec());
CHKERRABORT(libMesh::COMM_WORLD,ierr);
ierr = SNESGetIterationNumber(this->_snes,&n_iterations);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
ierr = SNESGetLinearSolveIterations(this->_snes, &this->_n_linear_iterations);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
ierr = SNESGetFunctionNorm(this->_snes,&final_residual_norm);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
// Get and store the reason for convergence
SNESGetConvergedReason(this->_snes, &this->_reason);
//Based on Petsc 2.3.3 documentation all diverged reasons are negative
this->converged = (this->_reason >= 0);
this->clear();
STOP_LOG("solve()", "PetscNonlinearSolver");
// return the # of its. and the final residual norm.
return std::make_pair(n_iterations, final_residual_norm);
}
示例13: step
virtual TimeStepperStatus step(Real *ftime) {
PetscErrorCode ierr;
TSConvergedReason reason;
ierr = TSStep(_ts);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
ierr = TSGetConvergedReason(_ts,&reason);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
ierr = TSGetTime(_ts,ftime);
CHKERRABORT(libMesh::COMM_WORLD,ierr);
return (TimeStepperStatus)reason;
}
示例14: PETScMatvec
void PETScMatvec(void *x, PRIMME_INT *ldx, void *y, PRIMME_INT *ldy, int *blockSize, primme_params *primme, int *err) {
Mat *matrix;
PetscInt m, n, mLocal, nLocal;
PetscErrorCode ierr;
matrix = (Mat *)primme->matrix;
ierr = MatGetSize(*matrix, &m, &n); CHKERRABORT(*(MPI_Comm*)primme->commInfo, ierr);
ierr = MatGetLocalSize(*matrix, &mLocal, &nLocal); CHKERRABORT(*(MPI_Comm*)primme->commInfo, ierr);
assert(m == primme->n && n == primme->n && mLocal == primme->nLocal
&& nLocal == primme->nLocal);
PETScMatvecGen(x, *ldx, y, *ldy, *blockSize, 0, *matrix, *(MPI_Comm*)primme->commInfo);
*err = 0;
}
示例15: PetscDMRegister
void PetscDMRegister()
{
if (PetscDMRegistered)
return;
PetscErrorCode ierr;
#if PETSC_RELEASE_LESS_THAN(3,4,0)
ierr = DMRegister(DMLIBMESH, PETSC_NULL, "DMCreate_libMesh", DMCreate_libMesh); CHKERRABORT(libMesh::COMM_WORLD,ierr);
#else
ierr = DMRegister(DMLIBMESH, DMCreate_libMesh); CHKERRABORT(libMesh::COMM_WORLD,ierr);
#endif
PetscDMRegistered = PETSC_TRUE;
}