本文整理汇总了C++中SmartPtr::MakeNew方法的典型用法代码示例。如果您正苦于以下问题:C++ SmartPtr::MakeNew方法的具体用法?C++ SmartPtr::MakeNew怎么用?C++ SmartPtr::MakeNew使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SmartPtr
的用法示例。
在下文中一共展示了SmartPtr::MakeNew方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Vector
CompoundVector::CompoundVector(const CompoundVectorSpace* owner_space, bool create_new)
:
Vector(owner_space),
comps_(owner_space->NCompSpaces()),
const_comps_(owner_space->NCompSpaces()),
owner_space_(owner_space),
vectors_valid_(false)
{
Index dim_check = 0;
for (Index i=0; i<NComps(); i++) {
SmartPtr<const VectorSpace> space = owner_space_->GetCompSpace(i);
DBG_ASSERT(IsValid(space));
dim_check += space->Dim();
if (create_new) {
comps_[i] = space->MakeNew();
}
}
DBG_ASSERT(dim_check == Dim());
if (create_new) {
vectors_valid_ = VectorsValid();
}
}
示例2: ConstPtr
SmartPtr<const Vector> AugRestoSystemSolver::Rhs_cR(const Vector& rhs_c,
const SmartPtr<const Vector>& sigma_tilde_n_c_inv, const Vector& rhs_n_c,
const SmartPtr<const Vector>& sigma_tilde_p_c_inv, const Vector& rhs_p_c)
{
DBG_START_METH("AugRestoSystemSolver::Rhs_cR",dbg_verbosity);
SmartPtr<Vector> retVec;
std::vector<const TaggedObject*> deps(5);
std::vector<Number> scalar_deps;
deps[0] = &rhs_c;
deps[1] = GetRawPtr(sigma_tilde_n_c_inv);
deps[2] = &rhs_n_c;
deps[3] = GetRawPtr(sigma_tilde_p_c_inv);
deps[4] = &rhs_p_c;
if (!rhs_cR_cache_.GetCachedResult(retVec, deps, scalar_deps)) {
DBG_PRINT((1,"Not found in cache\n"));
retVec = rhs_c.MakeNew();
retVec->Copy(rhs_c);
SmartPtr<Vector> tmp = retVec->MakeNew();
if (IsValid(sigma_tilde_n_c_inv)) {
tmp->Copy(*sigma_tilde_n_c_inv);
tmp->ElementWiseMultiply(rhs_n_c);
retVec->Axpy(-1.0, *tmp);
}
if (IsValid(sigma_tilde_p_c_inv)) {
tmp->Copy(*sigma_tilde_p_c_inv);
tmp->ElementWiseMultiply(rhs_p_c);
retVec->Axpy(1.0, *tmp);
}
rhs_cR_cache_.AddCachedResult(retVec, deps, scalar_deps);
}
return ConstPtr(retVec);
}
示例3: SchurSolve
/* The functions SchurSolve do IFT step, if S_==NULL, and DenseGenSchurDriver otherwise. */
bool DenseGenSchurDriver::SchurSolve(SmartPtr<IteratesVector> lhs, // new left hand side will be stored here
SmartPtr<const IteratesVector> rhs, // rhs r_s
SmartPtr<Vector> delta_u, // should be (u_p - u_0) WATCH OUT FOR THE SIGN! I like it this way, so that u_0+delta_u = u_p, but victor always used it the other way round, so be careful. At the end, delta_nu is saved in here.
SmartPtr<IteratesVector> sol) // the vector K^(-1)*r_s which usually should have been computed before.
{
DBG_START_METH("DenseGenSchurDriver::SchurSolve", dbg_verbosity);
DBG_ASSERT(IsValid(S_));
bool retval;
// set up rhs of equation (3.48a)
SmartPtr<Vector> delta_rhs = delta_u->MakeNew();
data_B()->Multiply(*sol, *delta_rhs);
delta_rhs->Print(Jnlst(),J_VECTOR,J_USER1,"delta_rhs");
delta_rhs->Scal(-1.0);
delta_rhs->Axpy(1.0, *delta_u);
delta_rhs->Print(Jnlst(),J_VECTOR,J_USER1,"rhs 3.48a");
// solve equation (3.48a) for delta_nu
SmartPtr<DenseVector> delta_nu = dynamic_cast<DenseVector*>(GetRawPtr(delta_rhs))->MakeNewDenseVector();
delta_nu->Copy(*delta_rhs);
S_->LUSolveVector(*delta_nu); // why is LUSolveVector not bool??
delta_nu->Print(Jnlst(),J_VECTOR,J_USER1,"delta_nu");
// solve equation (3.48b) for lhs (=delta_s)
SmartPtr<IteratesVector> new_rhs = lhs->MakeNewIteratesVector();
data_A()->TransMultiply(*delta_nu, *new_rhs);
new_rhs->Axpy(-1.0, *rhs);
new_rhs->Scal(-1.0);
new_rhs->Print(Jnlst(),J_VECTOR,J_USER1,"new_rhs");
retval = backsolver_->Solve(lhs, ConstPtr(new_rhs));
return retval;
}
示例4:
SmartPtr<Vector> NLPScalingObject::apply_vector_scaling_d_LU_NonConst(
const Matrix& Pd_LU,
const SmartPtr<const Vector>& lu,
const VectorSpace& d_space
)
{
DBG_START_METH("NLPScalingObject::apply_vector_scaling_d_LU_NonConst", dbg_verbosity);
SmartPtr<Vector> scaled_d_LU = lu->MakeNew();
if( have_d_scaling() )
{
SmartPtr<Vector> tmp_d = d_space.MakeNew();
// move to full d space
Pd_LU.MultVector(1.0, *lu, 0.0, *tmp_d);
// scale in full x space
tmp_d = apply_vector_scaling_d_NonConst(ConstPtr(tmp_d));
// move back to x_L space
Pd_LU.TransMultVector(1.0, *tmp_d, 0.0, *scaled_d_LU);
}
else
{
scaled_d_LU->Copy(*lu);
}
return scaled_d_LU;
}
示例5:
SmartPtr<Vector> NLPScalingObject::apply_vector_scaling_x_LU_NonConst(
const Matrix& Px_LU,
const SmartPtr<const Vector>& lu,
const VectorSpace& x_space)
{
SmartPtr<Vector> scaled_x_LU = lu->MakeNew();
if (have_x_scaling()) {
SmartPtr<Vector> tmp_x = x_space.MakeNew();
// move to full x space
Px_LU.MultVector(1.0, *lu, 0.0, *tmp_x);
// scale in full x space
tmp_x = apply_vector_scaling_x_NonConst(ConstPtr(tmp_x));
// move back to x_L space
Px_LU.TransMultVector(1.0, *tmp_x, 0.0, *scaled_x_LU);
}
else {
scaled_x_LU->Copy(*lu);
}
return scaled_x_LU;
}
示例6: Jnlst
bool
RestoRestorationPhase::PerformRestoration()
{
DBG_START_METH("RestoRestorationPhase::PerformRestoration",
dbg_verbosity);
Jnlst().Printf(J_DETAILED, J_MAIN,
"Performing second level restoration phase for current constriant violation %8.2e\n", IpCq().curr_constraint_violation());
DBG_ASSERT(IpCq().curr_constraint_violation()>0.);
// Get a grip on the restoration phase NLP and obtain the pointers
// to the original NLP data
SmartPtr<RestoIpoptNLP> resto_ip_nlp =
static_cast<RestoIpoptNLP*> (&IpNLP());
DBG_ASSERT(dynamic_cast<RestoIpoptNLP*> (&IpNLP()));
SmartPtr<IpoptNLP> orig_ip_nlp =
static_cast<IpoptNLP*> (&resto_ip_nlp->OrigIpNLP());
DBG_ASSERT(dynamic_cast<IpoptNLP*> (&resto_ip_nlp->OrigIpNLP()));
// Get the current point and create a new vector for the result
SmartPtr<const CompoundVector> Ccurr_x =
static_cast<const CompoundVector*> (GetRawPtr(IpData().curr()->x()));
SmartPtr<Vector> new_x = IpData().curr()->x()->MakeNew();
SmartPtr<CompoundVector> Cnew_x =
static_cast<CompoundVector*> (GetRawPtr(new_x));
// The x values remain unchanged
SmartPtr<Vector> x = Cnew_x->GetCompNonConst(0);
x->Copy(*Ccurr_x->GetComp(0));
// ToDo in free mu mode - what to do here?
Number mu = IpData().curr_mu();
// Compute the initial values for the n and p variables for the
// equality constraints
Number rho = resto_ip_nlp->Rho();
SmartPtr<Vector> nc = Cnew_x->GetCompNonConst(1);
SmartPtr<Vector> pc = Cnew_x->GetCompNonConst(2);
SmartPtr<const Vector> cvec = orig_ip_nlp->c(*Ccurr_x->GetComp(0));
SmartPtr<Vector> a = nc->MakeNew();
SmartPtr<Vector> b = nc->MakeNew();
a->Set(mu/(2.*rho));
a->Axpy(-0.5, *cvec);
b->Copy(*cvec);
b->Scal(mu/(2.*rho));
solve_quadratic(*a, *b, *nc);
pc->Copy(*cvec);
pc->Axpy(1., *nc);
DBG_PRINT_VECTOR(2, "nc", *nc);
DBG_PRINT_VECTOR(2, "pc", *pc);
// initial values for the n and p variables for the inequality
// constraints
SmartPtr<Vector> nd = Cnew_x->GetCompNonConst(3);
SmartPtr<Vector> pd = Cnew_x->GetCompNonConst(4);
SmartPtr<Vector> dvec = pd->MakeNew();
dvec->Copy(*orig_ip_nlp->d(*Ccurr_x->GetComp(0)));
dvec->Axpy(-1., *IpData().curr()->s());
a = nd->MakeNew();
b = nd->MakeNew();
a->Set(mu/(2.*rho));
a->Axpy(-0.5, *dvec);
b->Copy(*dvec);
b->Scal(mu/(2.*rho));
solve_quadratic(*a, *b, *nd);
pd->Copy(*dvec);
pd->Axpy(1., *nd);
DBG_PRINT_VECTOR(2, "nd", *nd);
DBG_PRINT_VECTOR(2, "pd", *pd);
// Now set the trial point to the solution of the restoration phase
// s and all multipliers remain unchanged
SmartPtr<IteratesVector> new_trial = IpData().curr()->MakeNewContainer();
new_trial->Set_x(*new_x);
IpData().set_trial(new_trial);
IpData().Append_info_string("R");
return true;
}
示例7: SetInitialIterates
bool RestoIterateInitializer::SetInitialIterates()
{
DBG_START_METH("RestoIterateInitializer::SetInitialIterates",
dbg_verbosity);
// Get a grip on the restoration phase NLP and obtain the pointers
// to the original NLP data
SmartPtr<RestoIpoptNLP> resto_ip_nlp =
static_cast<RestoIpoptNLP*> (&IpNLP());
SmartPtr<IpoptNLP> orig_ip_nlp =
static_cast<IpoptNLP*> (&resto_ip_nlp->OrigIpNLP());
SmartPtr<IpoptData> orig_ip_data =
static_cast<IpoptData*> (&resto_ip_nlp->OrigIpData());
SmartPtr<IpoptCalculatedQuantities> orig_ip_cq =
static_cast<IpoptCalculatedQuantities*> (&resto_ip_nlp->OrigIpCq());
// Set the value of the barrier parameter
Number resto_mu;
resto_mu = Max(orig_ip_data->curr_mu(),
orig_ip_cq->curr_c()->Amax(),
orig_ip_cq->curr_d_minus_s()->Amax());
IpData().Set_mu(resto_mu);
Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
"Initial barrier parameter resto_mu = %e\n", resto_mu);
/////////////////////////////////////////////////////////////////////
// Initialize primal varialbes //
/////////////////////////////////////////////////////////////////////
// initialize the data structures in the restoration phase NLP
IpData().InitializeDataStructures(IpNLP(), false, false, false,
false, false);
SmartPtr<Vector> new_x = IpData().curr()->x()->MakeNew();
SmartPtr<CompoundVector> Cnew_x =
static_cast<CompoundVector*> (GetRawPtr(new_x));
// Set the trial x variables from the original NLP
Cnew_x->GetCompNonConst(0)->Copy(*orig_ip_data->curr()->x());
// Compute the initial values for the n and p variables for the
// equality constraints
Number rho = resto_ip_nlp->Rho();
DBG_PRINT((1,"rho = %e\n", rho));
SmartPtr<Vector> nc = Cnew_x->GetCompNonConst(1);
SmartPtr<Vector> pc = Cnew_x->GetCompNonConst(2);
SmartPtr<const Vector> cvec = orig_ip_cq->curr_c();
DBG_PRINT_VECTOR(2, "cvec", *cvec);
SmartPtr<Vector> a = nc->MakeNew();
SmartPtr<Vector> b = nc->MakeNew();
a->Set(resto_mu/(2.*rho));
a->Axpy(-0.5, *cvec);
b->Copy(*cvec);
b->Scal(resto_mu/(2.*rho));
DBG_PRINT_VECTOR(2, "a", *a);
DBG_PRINT_VECTOR(2, "b", *b);
solve_quadratic(*a, *b, *nc);
pc->Copy(*cvec);
pc->Axpy(1., *nc);
DBG_PRINT_VECTOR(2, "nc", *nc);
DBG_PRINT_VECTOR(2, "pc", *pc);
// initial values for the n and p variables for the inequality
// constraints
SmartPtr<Vector> nd = Cnew_x->GetCompNonConst(3);
SmartPtr<Vector> pd = Cnew_x->GetCompNonConst(4);
cvec = orig_ip_cq->curr_d_minus_s();
a = nd->MakeNew();
b = nd->MakeNew();
a->Set(resto_mu/(2.*rho));
a->Axpy(-0.5, *cvec);
b->Copy(*cvec);
b->Scal(resto_mu/(2.*rho));
solve_quadratic(*a, *b, *nd);
pd->Copy(*cvec);
pd->Axpy(1., *nd);
DBG_PRINT_VECTOR(2, "nd", *nd);
DBG_PRINT_VECTOR(2, "pd", *pd);
// Leave the slacks unchanged
SmartPtr<const Vector> new_s = orig_ip_data->curr()->s();
// Now set the primal trial variables
DBG_PRINT_VECTOR(2,"new_s",*new_s);
DBG_PRINT_VECTOR(2,"new_x",*new_x);
SmartPtr<IteratesVector> trial = IpData().curr()->MakeNewContainer();
trial->Set_primal(*new_x, *new_s);
IpData().set_trial(trial);
DBG_PRINT_VECTOR(2, "resto_c", *IpCq().trial_c());
DBG_PRINT_VECTOR(2, "resto_d_minus_s", *IpCq().trial_d_minus_s());
/////////////////////////////////////////////////////////////////////
// Initialize bound multipliers //
/////////////////////////////////////////////////////////////////////
SmartPtr<Vector> new_z_L = IpData().curr()->z_L()->MakeNew();
SmartPtr<CompoundVector> Cnew_z_L =
static_cast<CompoundVector*> (GetRawPtr(new_z_L));
DBG_ASSERT(IsValid(Cnew_z_L));
//.........这里部分代码省略.........
示例8: DetermineScalingParametersImpl
void EquilibrationScaling::DetermineScalingParametersImpl(
const SmartPtr<const VectorSpace> x_space,
const SmartPtr<const VectorSpace> c_space,
const SmartPtr<const VectorSpace> d_space,
const SmartPtr<const MatrixSpace> jac_c_space,
const SmartPtr<const MatrixSpace> jac_d_space,
const SmartPtr<const SymMatrixSpace> h_space,
const Matrix& Px_L, const Vector& x_L,
const Matrix& Px_U, const Vector& x_U,
Number& df,
SmartPtr<Vector>& dx,
SmartPtr<Vector>& dc,
SmartPtr<Vector>& dd)
{
DBG_ASSERT(IsValid(nlp_));
SmartPtr<Vector> x0 = x_space->MakeNew();
if (!nlp_->GetStartingPoint(GetRawPtr(x0), true,
NULL, false,
NULL, false,
NULL, false,
NULL, false)) {
THROW_EXCEPTION(FAILED_INITIALIZATION,
"Error getting initial point from NLP in EquilibrationScaling.\n");
}
// We store the added absolute values of the Jacobian and
// objective function gradient in an array of sufficient size
SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
SmartPtr<Matrix> jac_d = jac_d_space->MakeNew();
SmartPtr<Vector> grad_f = x_space->MakeNew();
const Index nnz_jac_c = TripletHelper::GetNumberEntries(*jac_c);
const Index nnz_jac_d = TripletHelper::GetNumberEntries(*jac_d);
const Index nc = jac_c_space->NRows();
const Index nd = jac_d_space->NRows();
const Index nx = x_space->Dim();
Number* avrg_values = new Number[nnz_jac_c+nnz_jac_d+nx];
Number* val_buffer = new Number[Max(nnz_jac_c,nnz_jac_d,nx)];
SmartPtr<PointPerturber> perturber =
new PointPerturber(*x0, point_perturbation_radius_,
Px_L, x_L, Px_U, x_U);
const Index num_evals = 4;
const Index max_num_eval_errors = 10;
Index num_eval_errors = 0;
for (Index ieval=0; ieval<num_evals; ieval++) {
// Compute obj gradient and Jacobian at random perturbation point
bool done = false;
while (!done) {
SmartPtr<Vector> xpert = perturber->MakeNewPerturbedPoint();
done = (nlp_->Eval_grad_f(*xpert, *grad_f) &&
nlp_->Eval_jac_c(*xpert, *jac_c) &&
nlp_->Eval_jac_d(*xpert, *jac_d));
if (!done) {
Jnlst().Printf(J_WARNING, J_INITIALIZATION,
"Error evaluating first derivatives as at perturbed point for equilibration-based scaling.\n");
num_eval_errors++;
}
if (num_eval_errors>max_num_eval_errors) {
delete [] val_buffer;
delete [] avrg_values;
THROW_EXCEPTION(FAILED_INITIALIZATION,
"Too many evaluation failures during equilibiration-based scaling.");
}
}
// Get the numbers out of the matrices and vectors, and add it
// to avrg_values
TripletHelper::FillValues(nnz_jac_c, *jac_c, val_buffer);
if (ieval==0) {
for (Index i=0; i<nnz_jac_c; i++) {
avrg_values[i] = fabs(val_buffer[i]);
}
}
else {
for (Index i=0; i<nnz_jac_c; i++) {
avrg_values[i] =+ fabs(val_buffer[i]);
}
}
TripletHelper::FillValues(nnz_jac_d, *jac_d, val_buffer);
if (ieval==0) {
for (Index i=0; i<nnz_jac_d; i++) {
avrg_values[nnz_jac_c+i] = fabs(val_buffer[i]);
}
}
else {
for (Index i=0; i<nnz_jac_d; i++) {
avrg_values[nnz_jac_c+i] =+ fabs(val_buffer[i]);
}
}
TripletHelper::FillValuesFromVector(nx, *grad_f, val_buffer);
if (ieval==0) {
for (Index i=0; i<nx; i++) {
avrg_values[nnz_jac_c+nnz_jac_d+i] = fabs(val_buffer[i]);
}
}
else {
for (Index i=0; i<nx; i++) {
avrg_values[nnz_jac_c+nnz_jac_d+i] =+ fabs(val_buffer[i]);
//.........这里部分代码省略.........
示例9: DetermineScalingParametersImpl
void GradientScaling::DetermineScalingParametersImpl(
const SmartPtr<const VectorSpace> x_space,
const SmartPtr<const VectorSpace> p_space,
const SmartPtr<const VectorSpace> c_space,
const SmartPtr<const VectorSpace> d_space,
const SmartPtr<const MatrixSpace> jac_c_space,
const SmartPtr<const MatrixSpace> jac_d_space,
const SmartPtr<const SymMatrixSpace> h_space,
const Matrix& Px_L, const Vector& x_L,
const Matrix& Px_U, const Vector& x_U,
Number& df,
SmartPtr<Vector>& dx,
SmartPtr<Vector>& dc,
SmartPtr<Vector>& dd)
{
DBG_ASSERT(IsValid(nlp_));
SmartPtr<Vector> x = x_space->MakeNew();
SmartPtr<Vector> p = p_space->MakeNew();
if (!nlp_->GetStartingPoint(GetRawPtr(x), true,
GetRawPtr(p), true,
NULL, false,
NULL, false,
NULL, false,
NULL, false)) {
THROW_EXCEPTION(FAILED_INITIALIZATION,
"Error getting initial point from NLP in GradientScaling.\n");
}
//
// Calculate grad_f scaling
//
SmartPtr<Vector> grad_f = x_space->MakeNew();
if (nlp_->Eval_grad_f(*x, *p, *grad_f)) {
double max_grad_f = grad_f->Amax();
df = 1.;
if (scaling_obj_target_gradient_ == 0.) {
if (max_grad_f > scaling_max_gradient_) {
df = scaling_max_gradient_ / max_grad_f;
}
}
else {
if (max_grad_f == 0.) {
Jnlst().Printf(J_WARNING, J_INITIALIZATION,
"Gradient of objective function is zero at starting point. Cannot determine scaling factor based on scaling_obj_target_gradient option.\n");
}
else {
df = scaling_obj_target_gradient_ / max_grad_f;
}
}
df = Max(df, scaling_min_value_);
Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
"Scaling parameter for objective function = %e\n", df);
}
else {
Jnlst().Printf(J_WARNING, J_INITIALIZATION,
"Error evaluating objective gradient at user provided starting point.\n No scaling factor for objective function computed!\n");
df = 1.;
}
//
// No x scaling
//
dx = NULL;
dc = NULL;
if (c_space->Dim()>0) {
//
// Calculate c scaling
//
SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
if (nlp_->Eval_jac_c(*x, *p, *jac_c)) {
dc = c_space->MakeNew();
const double dbl_min = std::numeric_limits<double>::min();
dc->Set(dbl_min);
jac_c->ComputeRowAMax(*dc, false);
Number arow_max = dc->Amax();
if (scaling_constr_target_gradient_<=0.) {
if (arow_max > scaling_max_gradient_) {
dc->ElementWiseReciprocal();
dc->Scal(scaling_max_gradient_);
SmartPtr<Vector> dummy = dc->MakeNew();
dummy->Set(1.);
dc->ElementWiseMin(*dummy);
}
else {
dc = NULL;
}
}
else {
dc->Set(scaling_constr_target_gradient_/arow_max);
}
if (IsValid(dc) && scaling_min_value_ > 0.) {
SmartPtr<Vector> tmp = dc->MakeNew();
tmp->Set(scaling_min_value_);
dc->ElementWiseMax(*tmp);
}
}
else {
Jnlst().Printf(J_WARNING, J_INITIALIZATION,
"Error evaluating Jacobian of equality constraints at user provided starting point.\n No scaling factors for equality constraints computed!\n");
//.........这里部分代码省略.........
示例10: MakeNew
inline
Vector* Vector::MakeNew() const
{
return owner_space_->MakeNew();
}
示例11: ComputeAlphaForY
Number InexactLSAcceptor::ComputeAlphaForY(
Number alpha_primal,
Number alpha_dual,
SmartPtr<IteratesVector>& delta
)
{
DBG_START_METH("InexactLSAcceptor::ComputeAlphaForY",
dbg_verbosity);
// Here, we choose as stepsize for y either alpha_primal, if the
// conditions from the ineqaxt paper is satisfied for it, or we
// compute the step size closest to alpha_primal but great than
// it, that does give the same progress as the full step would
// give.
Number alpha_y = alpha_primal;
SmartPtr<Vector> gx = IpCq().curr_grad_barrier_obj_x()->MakeNewCopy();
gx->AddTwoVectors(1., *IpCq().curr_jac_cT_times_curr_y_c(), 1., *IpCq().curr_jac_dT_times_curr_y_d(), 1.);
SmartPtr<Vector> Jxy = gx->MakeNew();
IpCq().curr_jac_c()->TransMultVector(1., *delta->y_c(), 0., *Jxy);
IpCq().curr_jac_d()->TransMultVector(1., *delta->y_d(), 1., *Jxy);
SmartPtr<const Vector> curr_scaling_slacks = InexCq().curr_scaling_slacks();
SmartPtr<Vector> gs = curr_scaling_slacks->MakeNew();
gs->AddTwoVectors(1., *IpCq().curr_grad_barrier_obj_s(), -1., *IpData().curr()->y_d(), 0.);
gs->ElementWiseMultiply(*curr_scaling_slacks);
SmartPtr<Vector> Sdy = delta->y_d()->MakeNewCopy();
Sdy->ElementWiseMultiply(*curr_scaling_slacks);
// using the magic formula in my notebook
Number a = pow(Jxy->Nrm2(), 2) + pow(Sdy->Nrm2(), 2);
Number b = 2 * (gx->Dot(*Jxy) - gs->Dot(*Sdy));
Number c = pow(gx->Nrm2(), 2) + pow(gs->Nrm2(), 2);
// First we check if the primal step size is good enough:
Number val_ap = alpha_primal * alpha_primal * a + alpha_primal * b + c;
Number val_1 = a + b + c;
if( val_ap <= val_1 )
{
Jnlst().Printf(J_DETAILED, J_LINE_SEARCH, " Step size for y: using alpha_primal\n.");
}
else
{
Number alpha_2 = -b / a - 1.;
Jnlst().Printf(J_DETAILED, J_LINE_SEARCH, " Step size for y candidate: %8.2e - ", alpha_2);
if( alpha_2 > alpha_primal && alpha_2 < 1. )
{
alpha_y = alpha_2;
Jnlst().Printf(J_DETAILED, J_LINE_SEARCH, "using that one\n.");
}
else
{
alpha_y = 1.;
Jnlst().Printf(J_DETAILED, J_LINE_SEARCH, "using 1 instead\n");
}
}
return alpha_y;
}
示例12: DetermineScalingParametersImpl
void GradientScaling::DetermineScalingParametersImpl(
const SmartPtr<const VectorSpace> x_space,
const SmartPtr<const VectorSpace> c_space,
const SmartPtr<const VectorSpace> d_space,
const SmartPtr<const MatrixSpace> jac_c_space,
const SmartPtr<const MatrixSpace> jac_d_space,
const SmartPtr<const SymMatrixSpace> h_space,
Number& df,
SmartPtr<Vector>& dx,
SmartPtr<Vector>& dc,
SmartPtr<Vector>& dd)
{
DBG_ASSERT(IsValid(nlp_));
SmartPtr<Vector> x = x_space->MakeNew();
if (!nlp_->GetStartingPoint(GetRawPtr(x), true,
NULL, false,
NULL, false,
NULL, false,
NULL, false)) {
THROW_EXCEPTION(FAILED_INITIALIZATION,
"Error getting initial point from NLP in GradientScaling.\n");
}
//
// Calculate grad_f scaling
//
SmartPtr<Vector> grad_f = x_space->MakeNew();
if (nlp_->Eval_grad_f(*x, *grad_f)) {
Number max_grad_f = grad_f->Amax();
df = 1;
if (max_grad_f > scaling_max_gradient_) {
df = scaling_max_gradient_ / max_grad_f;
}
Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
"Scaling parameter for objective function = %e\n", df);
}
else {
Jnlst().Printf(J_WARNING, J_INITIALIZATION,
"Error evaluating objective gradient at user provided starting point.\n No scaling factor for objective function computed!\n");
df = 1;
}
//
// No x scaling
//
dx = NULL;
//
// Calculate c scaling
//
SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
if (nlp_->Eval_jac_c(*x, *jac_c)) {
// ToDo: Don't use TripletHelper, have special methods on matrices instead
Index nnz = TripletHelper::GetNumberEntries(*jac_c);
Index* irow = new Index[nnz];
Index* jcol = new Index[nnz];
Number* values = new Number[nnz];
TripletHelper::FillRowCol(nnz, *jac_c, irow, jcol);
TripletHelper::FillValues(nnz, *jac_c, values);
Number* c_scaling = new Number[jac_c->NRows()];
for (Index r=0; r<jac_c->NRows(); r++) {
c_scaling[r] = 0;
}
// put the max of each row into c_scaling...
bool need_c_scale = false;
for (Index i=0; i<nnz; i++) {
if (fabs(values[i]) > scaling_max_gradient_) {
c_scaling[irow[i]-1] = Max(c_scaling[irow[i]-1], fabs(values[i]));
need_c_scale = true;
}
}
if (need_c_scale) {
// now compute the scaling factors for each row
for (Index r=0; r<jac_c->NRows(); r++) {
Number scaling = 1.0;
if (c_scaling[r] > scaling_max_gradient_) {
scaling = scaling_max_gradient_/c_scaling[r];
}
c_scaling[r] = scaling;
}
dc = c_space->MakeNew();
TripletHelper::PutValuesInVector(jac_c->NRows(), c_scaling, *dc);
if (Jnlst().ProduceOutput(J_DETAILED, J_INITIALIZATION)) {
Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
"Equality constraints are scaled with smallest scaling parameter is %e\n", dc->Min());
}
}
else {
Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
"Equality constraints are not scaled.\n");
dc = NULL;
}
delete [] irow;
irow = NULL;
delete [] jcol;
//.........这里部分代码省略.........