本文整理汇总了C++中SmartPtr::Min方法的典型用法代码示例。如果您正苦于以下问题:C++ SmartPtr::Min方法的具体用法?C++ SmartPtr::Min怎么用?C++ SmartPtr::Min使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SmartPtr
的用法示例。
在下文中一共展示了SmartPtr::Min方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
bool
NLPBoundsRemover::GetBoundsInformation(const Matrix& Px_L,
Vector& x_L,
const Matrix& Px_U,
Vector& x_U,
const Matrix& Pd_L,
Vector& d_L,
const Matrix& Pd_U,
Vector& d_U)
{
const CompoundMatrix* comp_pd_l =
static_cast<const CompoundMatrix*>(&Pd_L);
DBG_ASSERT(dynamic_cast<const CompoundMatrix*>(&Pd_L));
SmartPtr<const Matrix> pd_l_orig = comp_pd_l->GetComp(0,0);
const CompoundMatrix* comp_pd_u =
static_cast<const CompoundMatrix*>(&Pd_U);
DBG_ASSERT(dynamic_cast<const CompoundMatrix*>(&Pd_U));
SmartPtr<const Matrix> pd_u_orig = comp_pd_u->GetComp(0,0);
CompoundVector* comp_d_l = static_cast<CompoundVector*>(&d_L);
DBG_ASSERT(dynamic_cast<CompoundVector*>(&d_L));
SmartPtr<Vector> d_l_orig = comp_d_l->GetCompNonConst(0);
SmartPtr<Vector> x_l_orig = comp_d_l->GetCompNonConst(1);
CompoundVector* comp_d_u = static_cast<CompoundVector*>(&d_U);
DBG_ASSERT(dynamic_cast<CompoundVector*>(&d_U));
SmartPtr<Vector> d_u_orig = comp_d_u->GetCompNonConst(0);
SmartPtr<Vector> x_u_orig = comp_d_u->GetCompNonConst(1);
// Here we do a santiy check to make sure that no inequality
// constraint has two non-infite bounds.
if (d_space_orig_->Dim()>0 && !allow_twosided_inequalities_) {
SmartPtr<Vector> d = d_space_orig_->MakeNew();
SmartPtr<Vector> tmp = d_l_orig->MakeNew();
tmp->Set(1.);
pd_l_orig->MultVector(1., *tmp, 0., *d);
tmp = d_u_orig->MakeNew();
tmp->Set(1.);
pd_u_orig->MultVector(1., *tmp, 1., *d);
Number dmax = d->Amax();
ASSERT_EXCEPTION(dmax==1., INVALID_NLP, "In NLPBoundRemover, an inequality with both lower and upper bounds was detected");
Number dmin = d->Min();
ASSERT_EXCEPTION(dmin==1., INVALID_NLP, "In NLPBoundRemover, an inequality with without bounds was detected.");
}
bool retval =
nlp_->GetBoundsInformation(*Px_l_orig_, *x_l_orig, *Px_u_orig_,
*x_u_orig, *pd_l_orig, *d_l_orig,
*pd_u_orig, *d_u_orig);
return retval;
}
示例2: DetermineScalingParametersImpl
void GradientScaling::DetermineScalingParametersImpl(
const SmartPtr<const VectorSpace> x_space,
const SmartPtr<const VectorSpace> c_space,
const SmartPtr<const VectorSpace> d_space,
const SmartPtr<const MatrixSpace> jac_c_space,
const SmartPtr<const MatrixSpace> jac_d_space,
const SmartPtr<const SymMatrixSpace> h_space,
Number& df,
SmartPtr<Vector>& dx,
SmartPtr<Vector>& dc,
SmartPtr<Vector>& dd)
{
DBG_ASSERT(IsValid(nlp_));
SmartPtr<Vector> x = x_space->MakeNew();
if (!nlp_->GetStartingPoint(GetRawPtr(x), true,
NULL, false,
NULL, false,
NULL, false,
NULL, false)) {
THROW_EXCEPTION(FAILED_INITIALIZATION,
"Error getting initial point from NLP in GradientScaling.\n");
}
//
// Calculate grad_f scaling
//
SmartPtr<Vector> grad_f = x_space->MakeNew();
if (nlp_->Eval_grad_f(*x, *grad_f)) {
Number max_grad_f = grad_f->Amax();
df = 1;
if (max_grad_f > scaling_max_gradient_) {
df = scaling_max_gradient_ / max_grad_f;
}
Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
"Scaling parameter for objective function = %e\n", df);
}
else {
Jnlst().Printf(J_WARNING, J_INITIALIZATION,
"Error evaluating objective gradient at user provided starting point.\n No scaling factor for objective function computed!\n");
df = 1;
}
//
// No x scaling
//
dx = NULL;
//
// Calculate c scaling
//
SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
if (nlp_->Eval_jac_c(*x, *jac_c)) {
// ToDo: Don't use TripletHelper, have special methods on matrices instead
Index nnz = TripletHelper::GetNumberEntries(*jac_c);
Index* irow = new Index[nnz];
Index* jcol = new Index[nnz];
Number* values = new Number[nnz];
TripletHelper::FillRowCol(nnz, *jac_c, irow, jcol);
TripletHelper::FillValues(nnz, *jac_c, values);
Number* c_scaling = new Number[jac_c->NRows()];
for (Index r=0; r<jac_c->NRows(); r++) {
c_scaling[r] = 0;
}
// put the max of each row into c_scaling...
bool need_c_scale = false;
for (Index i=0; i<nnz; i++) {
if (fabs(values[i]) > scaling_max_gradient_) {
c_scaling[irow[i]-1] = Max(c_scaling[irow[i]-1], fabs(values[i]));
need_c_scale = true;
}
}
if (need_c_scale) {
// now compute the scaling factors for each row
for (Index r=0; r<jac_c->NRows(); r++) {
Number scaling = 1.0;
if (c_scaling[r] > scaling_max_gradient_) {
scaling = scaling_max_gradient_/c_scaling[r];
}
c_scaling[r] = scaling;
}
dc = c_space->MakeNew();
TripletHelper::PutValuesInVector(jac_c->NRows(), c_scaling, *dc);
if (Jnlst().ProduceOutput(J_DETAILED, J_INITIALIZATION)) {
Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
"Equality constraints are scaled with smallest scaling parameter is %e\n", dc->Min());
}
}
else {
Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
"Equality constraints are not scaled.\n");
dc = NULL;
}
delete [] irow;
irow = NULL;
delete [] jcol;
//.........这里部分代码省略.........