本文整理汇总了C++中BoundConstraint::project方法的典型用法代码示例。如果您正苦于以下问题:C++ BoundConstraint::project方法的具体用法?C++ BoundConstraint::project怎么用?C++ BoundConstraint::project使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BoundConstraint
的用法示例。
在下文中一共展示了BoundConstraint::project方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: updateIterate
void updateIterate(Vector<Real> &xnew, const Vector<Real> &x, const Vector<Real> &s, Real alpha,
BoundConstraint<Real> &con ) {
xnew.set(x);
xnew.axpy(alpha,s);
if ( con.isActivated() ) {
con.project(xnew);
}
}
示例2: computeCriticalityMeasure
/** \brief Compute the gradient-based criticality measure.
The criticality measure is
\f$\|x_k - P_{[a,b]}(x_k-\nabla f(x_k))\|_{\mathcal{X}}\f$.
Here, \f$P_{[a,b]}\f$ denotes the projection onto the
bound constraints.
@param[in] x is the current iteration
@param[in] obj is the objective function
@param[in] con are the bound constraints
@param[in] tol is a tolerance for inexact evaluations of the objective function
*/
Real computeCriticalityMeasure(Vector<Real> &x, Objective<Real> &obj, BoundConstraint<Real> &con, Real tol) {
Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
obj.gradient(*(step_state->gradientVec),x,tol);
xtmp_->set(x);
xtmp_->axpy(-1.0,(step_state->gradientVec)->dual());
con.project(*xtmp_);
xtmp_->axpy(-1.0,x);
return xtmp_->norm();
}
示例3: update
void update( Vector<Real> &x, const Vector<Real> &s,
Objective<Real> &obj, BoundConstraint<Real> &bnd,
AlgorithmState<Real> &algo_state ) {
Real tol = std::sqrt(ROL_EPSILON<Real>()), one(1);
Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
// Update iterate and store previous step
algo_state.iter++;
d_->set(x);
x.plus(s);
bnd.project(x);
(step_state->descentVec)->set(x);
(step_state->descentVec)->axpy(-one,*d_);
algo_state.snorm = s.norm();
// Compute new gradient
gp_->set(*(step_state->gradientVec));
obj.update(x,true,algo_state.iter);
if ( computeObj_ ) {
algo_state.value = obj.value(x,tol);
algo_state.nfval++;
}
obj.gradient(*(step_state->gradientVec),x,tol);
algo_state.ngrad++;
// Update Secant Information
secant_->updateStorage(x,*(step_state->gradientVec),*gp_,s,algo_state.snorm,algo_state.iter+1);
// Update algorithm state
(algo_state.iterateVec)->set(x);
if ( useProjectedGrad_ ) {
gp_->set(*(step_state->gradientVec));
bnd.computeProjectedGradient( *gp_, x );
algo_state.gnorm = gp_->norm();
}
else {
d_->set(x);
d_->axpy(-one,(step_state->gradientVec)->dual());
bnd.project(*d_);
d_->axpy(-one,x);
algo_state.gnorm = d_->norm();
}
}
示例4: update
/** \brief Update step, if successful.
Given a trial step, \f$s_k\f$, this function updates \f$x_{k+1}=x_k+s_k\f$.
This function also updates the secant approximation.
@param[in,out] x is the updated iterate
@param[in] s is the computed trial step
@param[in] obj is the objective function
@param[in] con are the bound constraints
@param[in] algo_state contains the current state of the algorithm
*/
void update( Vector<Real> &x, const Vector<Real> &s, Objective<Real> &obj, BoundConstraint<Real> &con,
AlgorithmState<Real> &algo_state ) {
Real tol = std::sqrt(ROL_EPSILON);
Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
// Update iterate
algo_state.iter++;
x.axpy(1.0, s);
// Compute new gradient
if ( edesc_ == DESCENT_SECANT ||
(edesc_ == DESCENT_NEWTONKRYLOV && useSecantPrecond_) ) {
gp_->set(*(step_state->gradientVec));
}
obj.gradient(*(step_state->gradientVec),x,tol);
algo_state.ngrad++;
// Update Secant Information
if ( edesc_ == DESCENT_SECANT ||
(edesc_ == DESCENT_NEWTONKRYLOV && useSecantPrecond_) ) {
secant_->update(*(step_state->gradientVec),*gp_,s,algo_state.snorm,algo_state.iter+1);
}
// Update algorithm state
(algo_state.iterateVec)->set(x);
if ( con.isActivated() ) {
if ( useProjectedGrad_ ) {
gp_->set(*(step_state->gradientVec));
con.computeProjectedGradient( *gp_, x );
algo_state.gnorm = gp_->norm();
}
else {
d_->set(x);
d_->axpy(-1.0,(step_state->gradientVec)->dual());
con.project(*d_);
d_->axpy(-1.0,x);
algo_state.gnorm = d_->norm();
}
}
else {
algo_state.gnorm = (step_state->gradientVec)->norm();
}
}
示例5: initialize
/** \brief Initialize step.
This includes projecting the initial guess onto the constraints,
computing the initial objective function value and gradient,
and initializing the dual variables.
@param[in,out] x is the initial guess
@param[in] obj is the objective function
@param[in] con are the bound constraints
@param[in] algo_state is the current state of the algorithm
*/
void initialize( Vector<Real> &x, const Vector<Real> &s, const Vector<Real> &g,
Objective<Real> &obj, BoundConstraint<Real> &con,
AlgorithmState<Real> &algo_state ) {
Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
// Initialize state descent direction and gradient storage
step_state->descentVec = s.clone();
step_state->gradientVec = g.clone();
step_state->searchSize = 0.0;
// Initialize additional storage
xlam_ = x.clone();
x0_ = x.clone();
xbnd_ = x.clone();
As_ = s.clone();
xtmp_ = x.clone();
res_ = g.clone();
Ag_ = g.clone();
rtmp_ = g.clone();
gtmp_ = g.clone();
// Project x onto constraint set
con.project(x);
// Update objective function, get value, and get gradient
Real tol = std::sqrt(ROL_EPSILON);
obj.update(x,true,algo_state.iter);
algo_state.value = obj.value(x,tol);
algo_state.nfval++;
algo_state.gnorm = computeCriticalityMeasure(x,obj,con,tol);
algo_state.ngrad++;
// Initialize dual variable
lambda_ = s.clone();
lambda_->set((step_state->gradientVec)->dual());
lambda_->scale(-1.0);
//con.setVectorToLowerBound(*lambda_);
// Initialize Hessian and preconditioner
Teuchos::RCP<Objective<Real> > obj_ptr = Teuchos::rcp(&obj, false);
Teuchos::RCP<BoundConstraint<Real> > con_ptr = Teuchos::rcp(&con, false);
hessian_ = Teuchos::rcp(
new PrimalDualHessian<Real>(secant_,obj_ptr,con_ptr,algo_state.iterateVec,xlam_,useSecantHessVec_) );
precond_ = Teuchos::rcp(
new PrimalDualPreconditioner<Real>(secant_,obj_ptr,con_ptr,algo_state.iterateVec,xlam_,
useSecantPrecond_) );
}
示例6: GradDotStep
Real GradDotStep(const Vector<Real> &g, const Vector<Real> &s,
const Vector<Real> &x,
BoundConstraint<Real> &bnd, Real eps = 0) {
Real gs(0), one(1);
if (!bnd.isActivated()) {
gs = s.dot(g.dual());
}
else {
d_->set(s);
bnd.pruneActive(*d_,g,x,eps);
gs = d_->dot(g.dual());
d_->set(x);
d_->axpy(-one,g.dual());
bnd.project(*d_);
d_->scale(-one);
d_->plus(x);
bnd.pruneInactive(*d_,g,x,eps);
gs -= d_->dot(g.dual());
}
return gs;
}
示例7: compute
/** \brief Compute step.
Computes a trial step, \f$s_k\f$ as defined by the enum EDescent. Once the
trial step is determined, this function determines an approximate minimizer
of the 1D function \f$\phi_k(t) = f(x_k+ts_k)\f$. This approximate
minimizer must satisfy sufficient decrease and curvature conditions.
@param[out] s is the computed trial step
@param[in] x is the current iterate
@param[in] obj is the objective function
@param[in] con are the bound constraints
@param[in] algo_state contains the current state of the algorithm
*/
void compute( Vector<Real> &s, const Vector<Real> &x, Objective<Real> &obj, BoundConstraint<Real> &con,
AlgorithmState<Real> &algo_state ) {
Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
Real tol = std::sqrt(ROL_EPSILON);
// Set active set parameter
Real eps = 0.0;
if ( con.isActivated() ) {
eps = algo_state.gnorm;
}
lineSearch_->setData(eps);
if ( hessian_ != Teuchos::null ) {
hessian_->setData(eps);
}
if ( precond_ != Teuchos::null ) {
precond_->setData(eps);
}
// Compute step s
switch(edesc_) {
case DESCENT_NEWTONKRYLOV:
flagKrylov_ = 0;
krylov_->run(s,*hessian_,*(step_state->gradientVec),*precond_,iterKrylov_,flagKrylov_);
break;
case DESCENT_NEWTON:
case DESCENT_SECANT:
hessian_->applyInverse(s,*(step_state->gradientVec),tol);
break;
case DESCENT_NONLINEARCG:
nlcg_->run(s,*(step_state->gradientVec),x,obj);
break;
case DESCENT_STEEPEST:
s.set(step_state->gradientVec->dual());
break;
default: break;
}
// Compute g.dot(s)
Real gs = 0.0;
if ( !con.isActivated() ) {
gs = -s.dot((step_state->gradientVec)->dual());
}
else {
if ( edesc_ == DESCENT_STEEPEST ) {
d_->set(x);
d_->axpy(-1.0,s);
con.project(*d_);
d_->scale(-1.0);
d_->plus(x);
//d->set(s);
//con.pruneActive(*d,s,x,eps);
//con.pruneActive(*d,*(step_state->gradientVec),x,eps);
gs = -d_->dot((step_state->gradientVec)->dual());
}
else {
d_->set(s);
con.pruneActive(*d_,*(step_state->gradientVec),x,eps);
gs = -d_->dot((step_state->gradientVec)->dual());
d_->set(x);
d_->axpy(-1.0,(step_state->gradientVec)->dual());
con.project(*d_);
d_->scale(-1.0);
d_->plus(x);
con.pruneInactive(*d_,*(step_state->gradientVec),x,eps);
gs -= d_->dot((step_state->gradientVec)->dual());
}
}
// Check if s is a descent direction i.e., g.dot(s) < 0
if ( gs >= 0.0 || (flagKrylov_ == 2 && iterKrylov_ <= 1) ) {
s.set((step_state->gradientVec)->dual());
if ( con.isActivated() ) {
d_->set(s);
con.pruneActive(*d_,s,x);
gs = -d_->dot((step_state->gradientVec)->dual());
}
else {
gs = -s.dot((step_state->gradientVec)->dual());
}
}
s.scale(-1.0);
// Perform line search
Real fnew = algo_state.value;
ls_nfval_ = 0;
ls_ngrad_ = 0;
//.........这里部分代码省略.........
示例8: compute
//.........这里部分代码省略.........
xlam_->set(*x0_); // xlam = x0
xlam_->axpy(scale_,*(lambda_)); // xlam = x0 + c*lambda
/********************************************************************/
// PROJECT x ONTO PRIMAL DUAL FEASIBLE SET
/********************************************************************/
As_->zero(); // As = 0
con.setVectorToUpperBound(*xbnd_); // xbnd = u
xbnd_->axpy(-1.0,x); // xbnd = u - x
xtmp_->set(*xbnd_); // tmp = u - x
con.pruneUpperActive(*xtmp_,*xlam_,neps_); // tmp = I(u - x)
xbnd_->axpy(-1.0,*xtmp_); // xbnd = A(u - x)
As_->plus(*xbnd_); // As += A(u - x)
con.setVectorToLowerBound(*xbnd_); // xbnd = l
xbnd_->axpy(-1.0,x); // xbnd = l - x
xtmp_->set(*xbnd_); // tmp = l - x
con.pruneLowerActive(*xtmp_,*xlam_,neps_); // tmp = I(l - x)
xbnd_->axpy(-1.0,*xtmp_); // xbnd = A(l - x)
As_->plus(*xbnd_); // As += A(l - x)
/********************************************************************/
// APPLY HESSIAN TO ACTIVE COMPONENTS OF s AND REMOVE INACTIVE
/********************************************************************/
itol_ = std::sqrt(ROL_EPSILON);
if ( useSecantHessVec_ && secant_ != Teuchos::null ) { // IHAs = H*As
secant_->applyB(*gtmp_,*As_,x);
}
else {
obj.hessVec(*gtmp_,*As_,x,itol_);
}
con.pruneActive(*gtmp_,*xlam_,neps_); // IHAs = I(H*As)
/********************************************************************/
// SEPARATE ACTIVE AND INACTIVE COMPONENTS OF THE GRADIENT
/********************************************************************/
rtmp_->set(*(step_state->gradientVec)); // Inactive components
con.pruneActive(*rtmp_,*xlam_,neps_);
Ag_->set(*(step_state->gradientVec)); // Active components
Ag_->axpy(-1.0,*rtmp_);
/********************************************************************/
// SOLVE REDUCED NEWTON SYSTEM
/********************************************************************/
rtmp_->plus(*gtmp_);
rtmp_->scale(-1.0); // rhs = -Ig - I(H*As)
s.zero();
if ( rtmp_->norm() > 0.0 ) {
//solve(s,*rtmp_,*xlam_,x,obj,con); // Call conjugate residuals
krylov_->run(s,*hessian_,*rtmp_,*precond_,iterCR_,flagCR_);
con.pruneActive(s,*xlam_,neps_); // s <- Is
}
s.plus(*As_); // s = Is + As
/********************************************************************/
// UPDATE MULTIPLIER
/********************************************************************/
if ( useSecantHessVec_ && secant_ != Teuchos::null ) {
secant_->applyB(*rtmp_,s,x);
}
else {
obj.hessVec(*rtmp_,s,x,itol_);
}
gtmp_->set(*rtmp_);
con.pruneActive(*gtmp_,*xlam_,neps_);
lambda_->set(*rtmp_);
lambda_->axpy(-1.0,*gtmp_);
lambda_->plus(*Ag_);
lambda_->scale(-1.0);
/********************************************************************/
// UPDATE STEP
/********************************************************************/
x0_->set(x);
x0_->plus(s);
res_->set(*(step_state->gradientVec));
res_->plus(*rtmp_);
// Compute criticality measure
xtmp_->set(*x0_);
xtmp_->axpy(-1.0,res_->dual());
con.project(*xtmp_);
xtmp_->axpy(-1.0,*x0_);
// std::cout << s.norm() << " "
// << tmp->norm() << " "
// << res_->norm() << " "
// << lambda_->norm() << " "
// << flagCR_ << " "
// << iterCR_ << "\n";
if ( xtmp_->norm() < gtol_*algo_state.gnorm ) {
flag_ = 0;
break;
}
if ( s.norm() < stol_*x.norm() ) {
flag_ = 2;
break;
}
}
if ( iter_ == maxit_ ) {
flag_ = 1;
}
else {
iter_++;
}
}