本文整理汇总了C++中Objective::gradient方法的典型用法代码示例。如果您正苦于以下问题:C++ Objective::gradient方法的具体用法?C++ Objective::gradient怎么用?C++ Objective::gradient使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Objective
的用法示例。
在下文中一共展示了Objective::gradient方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: update
void update( Vector<Real> &x, const Vector<Real> &s,
Objective<Real> &obj, BoundConstraint<Real> &bnd,
AlgorithmState<Real> &algo_state ) {
Real tol = std::sqrt(ROL_EPSILON<Real>());
Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
// Update iterate
algo_state.iter++;
x.plus(s);
(step_state->descentVec)->set(s);
algo_state.snorm = s.norm();
// Compute new gradient
if ( useSecantPrecond_ ) {
gp_->set(*(step_state->gradientVec));
}
obj.update(x,true,algo_state.iter);
if ( computeObj_ ) {
algo_state.value = obj.value(x,tol);
algo_state.nfval++;
}
obj.gradient(*(step_state->gradientVec),x,tol);
algo_state.ngrad++;
// Update Secant Information
if ( useSecantPrecond_ ) {
secant_->updateStorage(x,*(step_state->gradientVec),*gp_,s,algo_state.snorm,algo_state.iter+1);
}
// Update algorithm state
(algo_state.iterateVec)->set(x);
algo_state.gnorm = step_state->gradientVec->norm();
}
示例2: computeCriticalityMeasure
/** \brief Compute the gradient-based criticality measure.
The criticality measure is
\f$\|x_k - P_{[a,b]}(x_k-\nabla f(x_k))\|_{\mathcal{X}}\f$.
Here, \f$P_{[a,b]}\f$ denotes the projection onto the
bound constraints.
@param[in] x is the current iteration
@param[in] obj is the objective function
@param[in] con are the bound constraints
@param[in] tol is a tolerance for inexact evaluations of the objective function
*/
Real computeCriticalityMeasure(Vector<Real> &x, Objective<Real> &obj, BoundConstraint<Real> &con, Real tol) {
Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
obj.gradient(*(step_state->gradientVec),x,tol);
xtmp_->set(x);
xtmp_->axpy(-1.0,(step_state->gradientVec)->dual());
con.project(*xtmp_);
xtmp_->axpy(-1.0,x);
return xtmp_->norm();
}
示例3: update
/** \brief Update step, if successful.
Given a trial step, \f$s_k\f$, this function updates \f$x_{k+1}=x_k+s_k\f$.
This function also updates the secant approximation.
@param[in,out] x is the updated iterate
@param[in] s is the computed trial step
@param[in] obj is the objective function
@param[in] con are the bound constraints
@param[in] algo_state contains the current state of the algorithm
*/
void update( Vector<Real> &x, const Vector<Real> &s, Objective<Real> &obj, BoundConstraint<Real> &con,
AlgorithmState<Real> &algo_state ) {
Real tol = std::sqrt(ROL_EPSILON);
Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
// Update iterate
algo_state.iter++;
x.axpy(1.0, s);
// Compute new gradient
if ( edesc_ == DESCENT_SECANT ||
(edesc_ == DESCENT_NEWTONKRYLOV && useSecantPrecond_) ) {
gp_->set(*(step_state->gradientVec));
}
obj.gradient(*(step_state->gradientVec),x,tol);
algo_state.ngrad++;
// Update Secant Information
if ( edesc_ == DESCENT_SECANT ||
(edesc_ == DESCENT_NEWTONKRYLOV && useSecantPrecond_) ) {
secant_->update(*(step_state->gradientVec),*gp_,s,algo_state.snorm,algo_state.iter+1);
}
// Update algorithm state
(algo_state.iterateVec)->set(x);
if ( con.isActivated() ) {
if ( useProjectedGrad_ ) {
gp_->set(*(step_state->gradientVec));
con.computeProjectedGradient( *gp_, x );
algo_state.gnorm = gp_->norm();
}
else {
d_->set(x);
d_->axpy(-1.0,(step_state->gradientVec)->dual());
con.project(*d_);
d_->axpy(-1.0,x);
algo_state.gnorm = d_->norm();
}
}
else {
algo_state.gnorm = (step_state->gradientVec)->norm();
}
}
示例4: update
void update( Vector<Real> &x, const Vector<Real> &s,
Objective<Real> &obj, BoundConstraint<Real> &bnd,
AlgorithmState<Real> &algo_state ) {
Real tol = std::sqrt(ROL_EPSILON<Real>()), one(1);
Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
// Update iterate and store previous step
algo_state.iter++;
d_->set(x);
x.plus(s);
bnd.project(x);
(step_state->descentVec)->set(x);
(step_state->descentVec)->axpy(-one,*d_);
algo_state.snorm = s.norm();
// Compute new gradient
gp_->set(*(step_state->gradientVec));
obj.update(x,true,algo_state.iter);
if ( computeObj_ ) {
algo_state.value = obj.value(x,tol);
algo_state.nfval++;
}
obj.gradient(*(step_state->gradientVec),x,tol);
algo_state.ngrad++;
// Update Secant Information
secant_->updateStorage(x,*(step_state->gradientVec),*gp_,s,algo_state.snorm,algo_state.iter+1);
// Update algorithm state
(algo_state.iterateVec)->set(x);
if ( useProjectedGrad_ ) {
gp_->set(*(step_state->gradientVec));
bnd.computeProjectedGradient( *gp_, x );
algo_state.gnorm = gp_->norm();
}
else {
d_->set(x);
d_->axpy(-one,(step_state->gradientVec)->dual());
bnd.project(*d_);
d_->axpy(-one,x);
algo_state.gnorm = d_->norm();
}
}
示例5: update
void update( Vector<Real> &x, const Vector<Real> &s, Objective<Real> &obj, BoundConstraint<Real> &con,
AlgorithmState<Real> &algo_state ) {
Real tol = std::sqrt(ROL_EPSILON<Real>());
Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
// Update iterate and store step
algo_state.iter++;
x.plus(s);
(step_state->descentVec)->set(s);
algo_state.snorm = s.norm();
// Compute new gradient
obj.update(x,true,algo_state.iter);
if ( computeObj_ ) {
algo_state.value = obj.value(x,tol);
algo_state.nfval++;
}
obj.gradient(*(step_state->gradientVec),x,tol);
algo_state.ngrad++;
// Update algorithm state
(algo_state.iterateVec)->set(x);
algo_state.gnorm = (step_state->gradientVec)->norm();
}
示例6: status
virtual bool status( const ELineSearch type, int &ls_neval, int &ls_ngrad, const Real alpha,
const Real fold, const Real sgold, const Real fnew,
const Vector<Real> &x, const Vector<Real> &s,
Objective<Real> &obj, BoundConstraint<Real> &con ) {
Real tol = std::sqrt(ROL_EPSILON);
// Check Armijo Condition
bool armijo = false;
if ( con.isActivated() ) {
Real gs = 0.0;
if ( edesc_ == DESCENT_STEEPEST ) {
updateIterate(*d_,x,s,alpha,con);
d_->scale(-1.0);
d_->plus(x);
gs = -s.dot(*d_);
}
else {
d_->set(s);
d_->scale(-1.0);
con.pruneActive(*d_,*(grad_),x,eps_);
gs = alpha*(grad_)->dot(*d_);
d_->zero();
updateIterate(*d_,x,s,alpha,con);
d_->scale(-1.0);
d_->plus(x);
con.pruneInactive(*d_,*(grad_),x,eps_);
gs += d_->dot(grad_->dual());
}
if ( fnew <= fold - c1_*gs ) {
armijo = true;
}
}
else {
if ( fnew <= fold + c1_*alpha*sgold ) {
armijo = true;
}
}
// Check Maximum Iteration
bool itcond = false;
if ( ls_neval >= maxit_ ) {
itcond = true;
}
// Check Curvature Condition
bool curvcond = false;
if ( armijo && ((type != LINESEARCH_BACKTRACKING && type != LINESEARCH_CUBICINTERP) ||
(edesc_ == DESCENT_NONLINEARCG)) ) {
if (econd_ == CURVATURECONDITION_GOLDSTEIN) {
if (fnew >= fold + (1.0-c1_)*alpha*sgold) {
curvcond = true;
}
}
else if (econd_ == CURVATURECONDITION_NULL) {
curvcond = true;
}
else {
updateIterate(*xtst_,x,s,alpha,con);
obj.update(*xtst_);
obj.gradient(*g_,*xtst_,tol);
Real sgnew = 0.0;
if ( con.isActivated() ) {
d_->set(s);
d_->scale(-alpha);
con.pruneActive(*d_,s,x);
sgnew = -d_->dot(g_->dual());
}
else {
sgnew = s.dot(g_->dual());
}
ls_ngrad++;
if ( ((econd_ == CURVATURECONDITION_WOLFE)
&& (sgnew >= c2_*sgold))
|| ((econd_ == CURVATURECONDITION_STRONGWOLFE)
&& (std::abs(sgnew) <= c2_*std::abs(sgold)))
|| ((econd_ == CURVATURECONDITION_GENERALIZEDWOLFE)
&& (c2_*sgold <= sgnew && sgnew <= -c3_*sgold))
|| ((econd_ == CURVATURECONDITION_APPROXIMATEWOLFE)
&& (c2_*sgold <= sgnew && sgnew <= (2.0*c1_ - 1.0)*sgold)) ) {
curvcond = true;
}
}
}
if (type == LINESEARCH_BACKTRACKING || type == LINESEARCH_CUBICINTERP) {
if (edesc_ == DESCENT_NONLINEARCG) {
return ((armijo && curvcond) || itcond);
}
else {
return (armijo || itcond);
}
}
else {
return ((armijo && curvcond) || itcond);
}
}