本文整理汇总了C++中AbstractLinAlgPack::combined_nu_comp_err方法的典型用法代码示例。如果您正苦于以下问题:C++ AbstractLinAlgPack::combined_nu_comp_err方法的具体用法?C++ AbstractLinAlgPack::combined_nu_comp_err怎么用?C++ AbstractLinAlgPack::combined_nu_comp_err使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AbstractLinAlgPack
的用法示例。
在下文中一共展示了AbstractLinAlgPack::combined_nu_comp_err方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Converged
bool CheckConvergenceStd_Strategy::Converged(
Algorithm& _algo
)
{
using AbstractLinAlgPack::assert_print_nan_inf;
using AbstractLinAlgPack::combined_nu_comp_err;
NLPAlgo &algo = rsqp_algo(_algo);
NLPAlgoState &s = algo.rsqp_state();
NLP &nlp = algo.nlp();
EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
std::ostream& out = algo.track().journal_out();
const size_type
n = nlp.n(),
m = nlp.m(),
nb = nlp.num_bounded_x();
// Get the iteration quantities
IterQuantityAccess<value_type>
&opt_kkt_err_iq = s.opt_kkt_err(),
&feas_kkt_err_iq = s.feas_kkt_err(),
&comp_kkt_err_iq = s.comp_kkt_err();
IterQuantityAccess<VectorMutable>
&x_iq = s.x(),
&d_iq = s.d(),
&Gf_iq = s.Gf(),
*c_iq = m ? &s.c() : NULL,
*rGL_iq = n > m ? &s.rGL() : NULL,
*GL_iq = n > m ? &s.GL() : NULL,
*nu_iq = n > m ? &s.nu() : NULL;
// opt_err = (||rGL||inf or ||GL||) / (||Gf|| + scale_kkt_factor)
value_type
norm_inf_Gf_k = 0.0,
norm_inf_GLrGL_k = 0.0;
if( n > m && scale_opt_error_by_Gf() && Gf_iq.updated_k(0) ) {
assert_print_nan_inf(
norm_inf_Gf_k = Gf_iq.get_k(0).norm_inf(),
"||Gf_k||inf",true,&out
);
}
// NOTE:
// The strategy object CheckConvergenceIP_Strategy assumes
// that this will always be the gradient of the lagrangian
// of the original problem, not the gradient of the lagrangian
// for psi. (don't use augmented nlp info here)
if( n > m ) {
if( opt_error_check() == OPT_ERROR_REDUCED_GRADIENT_LAGR ) {
assert_print_nan_inf( norm_inf_GLrGL_k = rGL_iq->get_k(0).norm_inf(),
"||rGL_k||inf",true,&out);
}
else {
assert_print_nan_inf( norm_inf_GLrGL_k = GL_iq->get_k(0).norm_inf(),
"||GL_k||inf",true,&out);
}
}
const value_type
opt_scale_factor = 1.0 + norm_inf_Gf_k,
opt_err = norm_inf_GLrGL_k / opt_scale_factor;
// feas_err
const value_type feas_err = ( ( m ? c_iq->get_k(0).norm_inf() : 0.0 ) );
// comp_err
value_type comp_err = 0.0;
if ( n > m ) {
if (nb > 0) {
comp_err = combined_nu_comp_err(nu_iq->get_k(0), x_iq.get_k(0), nlp.xl(), nlp.xu());
}
if(m) {
assert_print_nan_inf( feas_err,"||c_k||inf",true,&out);
}
}
// scaling factors
const value_type
scale_opt_factor = CalculateScalingFactor(s, scale_opt_error_by()),
scale_feas_factor = CalculateScalingFactor(s, scale_feas_error_by()),
scale_comp_factor = CalculateScalingFactor(s, scale_comp_error_by());
// kkt_err
const value_type
opt_kkt_err_k = opt_err/scale_opt_factor,
feas_kkt_err_k = feas_err/scale_feas_factor,
comp_kkt_err_k = comp_err/scale_comp_factor;
// update the iteration quantities
if(n > m) opt_kkt_err_iq.set_k(0) = opt_kkt_err_k;
feas_kkt_err_iq.set_k(0) = feas_kkt_err_k;
comp_kkt_err_iq.set_k(0) = comp_kkt_err_k;
// step_err
value_type step_err = 0.0;
if( d_iq.updated_k(0) ) {
//.........这里部分代码省略.........