本文整理汇总了C++中SmartPtr::Amax方法的典型用法代码示例。如果您正苦于以下问题:C++ SmartPtr::Amax方法的具体用法?C++ SmartPtr::Amax怎么用?C++ SmartPtr::Amax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SmartPtr
的用法示例。
在下文中一共展示了SmartPtr::Amax方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
bool
NLPBoundsRemover::GetBoundsInformation(const Matrix& Px_L,
Vector& x_L,
const Matrix& Px_U,
Vector& x_U,
const Matrix& Pd_L,
Vector& d_L,
const Matrix& Pd_U,
Vector& d_U)
{
const CompoundMatrix* comp_pd_l =
static_cast<const CompoundMatrix*>(&Pd_L);
DBG_ASSERT(dynamic_cast<const CompoundMatrix*>(&Pd_L));
SmartPtr<const Matrix> pd_l_orig = comp_pd_l->GetComp(0,0);
const CompoundMatrix* comp_pd_u =
static_cast<const CompoundMatrix*>(&Pd_U);
DBG_ASSERT(dynamic_cast<const CompoundMatrix*>(&Pd_U));
SmartPtr<const Matrix> pd_u_orig = comp_pd_u->GetComp(0,0);
CompoundVector* comp_d_l = static_cast<CompoundVector*>(&d_L);
DBG_ASSERT(dynamic_cast<CompoundVector*>(&d_L));
SmartPtr<Vector> d_l_orig = comp_d_l->GetCompNonConst(0);
SmartPtr<Vector> x_l_orig = comp_d_l->GetCompNonConst(1);
CompoundVector* comp_d_u = static_cast<CompoundVector*>(&d_U);
DBG_ASSERT(dynamic_cast<CompoundVector*>(&d_U));
SmartPtr<Vector> d_u_orig = comp_d_u->GetCompNonConst(0);
SmartPtr<Vector> x_u_orig = comp_d_u->GetCompNonConst(1);
// Here we do a santiy check to make sure that no inequality
// constraint has two non-infite bounds.
if (d_space_orig_->Dim()>0 && !allow_twosided_inequalities_) {
SmartPtr<Vector> d = d_space_orig_->MakeNew();
SmartPtr<Vector> tmp = d_l_orig->MakeNew();
tmp->Set(1.);
pd_l_orig->MultVector(1., *tmp, 0., *d);
tmp = d_u_orig->MakeNew();
tmp->Set(1.);
pd_u_orig->MultVector(1., *tmp, 1., *d);
Number dmax = d->Amax();
ASSERT_EXCEPTION(dmax==1., INVALID_NLP, "In NLPBoundRemover, an inequality with both lower and upper bounds was detected");
Number dmin = d->Min();
ASSERT_EXCEPTION(dmin==1., INVALID_NLP, "In NLPBoundRemover, an inequality with without bounds was detected.");
}
bool retval =
nlp_->GetBoundsInformation(*Px_l_orig_, *x_l_orig, *Px_u_orig_,
*x_u_orig, *pd_l_orig, *d_l_orig,
*pd_u_orig, *d_u_orig);
return retval;
}
示例2: DetermineScalingParametersImpl
void GradientScaling::DetermineScalingParametersImpl(
const SmartPtr<const VectorSpace> x_space,
const SmartPtr<const VectorSpace> p_space,
const SmartPtr<const VectorSpace> c_space,
const SmartPtr<const VectorSpace> d_space,
const SmartPtr<const MatrixSpace> jac_c_space,
const SmartPtr<const MatrixSpace> jac_d_space,
const SmartPtr<const SymMatrixSpace> h_space,
const Matrix& Px_L, const Vector& x_L,
const Matrix& Px_U, const Vector& x_U,
Number& df,
SmartPtr<Vector>& dx,
SmartPtr<Vector>& dc,
SmartPtr<Vector>& dd)
{
DBG_ASSERT(IsValid(nlp_));
SmartPtr<Vector> x = x_space->MakeNew();
SmartPtr<Vector> p = p_space->MakeNew();
if (!nlp_->GetStartingPoint(GetRawPtr(x), true,
GetRawPtr(p), true,
NULL, false,
NULL, false,
NULL, false,
NULL, false)) {
THROW_EXCEPTION(FAILED_INITIALIZATION,
"Error getting initial point from NLP in GradientScaling.\n");
}
//
// Calculate grad_f scaling
//
SmartPtr<Vector> grad_f = x_space->MakeNew();
if (nlp_->Eval_grad_f(*x, *p, *grad_f)) {
double max_grad_f = grad_f->Amax();
df = 1.;
if (scaling_obj_target_gradient_ == 0.) {
if (max_grad_f > scaling_max_gradient_) {
df = scaling_max_gradient_ / max_grad_f;
}
}
else {
if (max_grad_f == 0.) {
Jnlst().Printf(J_WARNING, J_INITIALIZATION,
"Gradient of objective function is zero at starting point. Cannot determine scaling factor based on scaling_obj_target_gradient option.\n");
}
else {
df = scaling_obj_target_gradient_ / max_grad_f;
}
}
df = Max(df, scaling_min_value_);
Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
"Scaling parameter for objective function = %e\n", df);
}
else {
Jnlst().Printf(J_WARNING, J_INITIALIZATION,
"Error evaluating objective gradient at user provided starting point.\n No scaling factor for objective function computed!\n");
df = 1.;
}
//
// No x scaling
//
dx = NULL;
dc = NULL;
if (c_space->Dim()>0) {
//
// Calculate c scaling
//
SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
if (nlp_->Eval_jac_c(*x, *p, *jac_c)) {
dc = c_space->MakeNew();
const double dbl_min = std::numeric_limits<double>::min();
dc->Set(dbl_min);
jac_c->ComputeRowAMax(*dc, false);
Number arow_max = dc->Amax();
if (scaling_constr_target_gradient_<=0.) {
if (arow_max > scaling_max_gradient_) {
dc->ElementWiseReciprocal();
dc->Scal(scaling_max_gradient_);
SmartPtr<Vector> dummy = dc->MakeNew();
dummy->Set(1.);
dc->ElementWiseMin(*dummy);
}
else {
dc = NULL;
}
}
else {
dc->Set(scaling_constr_target_gradient_/arow_max);
}
if (IsValid(dc) && scaling_min_value_ > 0.) {
SmartPtr<Vector> tmp = dc->MakeNew();
tmp->Set(scaling_min_value_);
dc->ElementWiseMax(*tmp);
}
}
else {
Jnlst().Printf(J_WARNING, J_INITIALIZATION,
"Error evaluating Jacobian of equality constraints at user provided starting point.\n No scaling factors for equality constraints computed!\n");
//.........这里部分代码省略.........
示例3: UpdatePenaltyParameter
char CGPenaltyLSAcceptor::UpdatePenaltyParameter()
{
DBG_START_METH("CGPenaltyLSAcceptor::UpdatePenaltyParameter",
dbg_verbosity);
char info_alpha_primal_char = 'n';
// We use the new infeasibility here...
Number trial_inf = IpCq().trial_primal_infeasibility(NORM_2);
Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
"trial infeasibility = %8.2\n", trial_inf);
if (curr_eta_<0.) {
// We need to initialize the eta tolerance
curr_eta_ = Max(eta_min_, Min(gamma_tilde_,
gamma_hat_*IpCq().curr_nlp_error()));
}
// Check if the penalty parameter is to be increased
Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
"Starting tests for penalty parameter update:\n");
bool increase = (trial_inf >= penalty_update_infeasibility_tol_);
if (!increase) {
info_alpha_primal_char='i';
}
if (increase) {
Number max_step = Max(CGPenData().delta_cgpen()->x()->Amax(),
CGPenData().delta_cgpen()->s()->Amax());
Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
"Max norm of step = %8.2\n", max_step);
increase = (max_step <= curr_eta_);
if (!increase) {
info_alpha_primal_char='d';
}
}
// Lifeng: Should we use the new complementarity here? If so, I
// have to restructure BacktrackingLineSearch
Number mu = IpData().curr_mu();
if (increase) {
Number min_compl = mu;
Number max_compl = mu;
if (IpNLP().x_L()->Dim()>0) {
SmartPtr<const Vector> compl_x_L = IpCq().curr_compl_x_L();
min_compl = Min(min_compl, compl_x_L->Min());
max_compl = Max(max_compl, compl_x_L->Max());
}
if (IpNLP().x_U()->Dim()>0) {
SmartPtr<const Vector> compl_x_U = IpCq().curr_compl_x_U();
min_compl = Min(min_compl, compl_x_U->Min());
max_compl = Max(max_compl, compl_x_U->Max());
}
if (IpNLP().d_L()->Dim()>0) {
SmartPtr<const Vector> compl_s_L = IpCq().curr_compl_s_L();
min_compl = Min(min_compl, compl_s_L->Min());
max_compl = Max(max_compl, compl_s_L->Max());
}
if (IpNLP().d_U()->Dim()>0) {
SmartPtr<const Vector> compl_s_U = IpCq().curr_compl_s_U();
min_compl = Min(min_compl, compl_s_U->Min());
max_compl = Max(max_compl, compl_s_U->Max());
}
Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
"Minimal compl = %8.2\n", min_compl);
Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
"Maximal compl = %8.2\n", max_compl);
increase = (min_compl >= mu*penalty_update_compl_tol_ &&
max_compl <= mu/penalty_update_compl_tol_);
if (!increase) {
info_alpha_primal_char='c';
}
}
// Lifeng: Here I'm using the information from the current step
// and the current infeasibility
if (increase) {
SmartPtr<Vector> vec = IpData().curr()->y_c()->MakeNewCopy();
vec->AddTwoVectors(1., *CGPenData().delta_cgpen()->y_c(),
-1./CGPenCq().curr_cg_pert_fact(), *IpCq().curr_c(),
1.);
Number omega_test = vec->Amax();
Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
"omega_test for c = %8.2\n", omega_test);
increase = (omega_test < curr_eta_);
if (increase) {
SmartPtr<Vector> vec = IpData().curr()->y_d()->MakeNewCopy();
vec->AddTwoVectors(1., *IpData().delta()->y_d(),
-1./CGPenCq().curr_cg_pert_fact(), *IpCq().curr_d_minus_s(),
1.);
omega_test = vec->Amax();
Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
"omega_test for d = %8.2\n", omega_test);
increase = (omega_test < curr_eta_);
}
if (!increase) {
info_alpha_primal_char='m';
}
}
if (increase) {
// Ok, now we should increase the penalty parameter
counter_first_type_penalty_updates_++;
// Update the eta tolerance
curr_eta_ = Max(eta_min_, curr_eta_/2.);
Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
"Updating eta to = %8.2\n", curr_eta_);
Number penalty = CGPenData().curr_kkt_penalty();
//.........这里部分代码省略.........