本文整理汇总了C++中SX::nnz方法的典型用法代码示例。如果您正苦于以下问题:C++ SX::nnz方法的具体用法?C++ SX::nnz怎么用?C++ SX::nnz使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SX
的用法示例。
在下文中一共展示了SX::nnz方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: init
void AmplInterface::init(const Dict& opts) {
// Call the init method of the base class
Nlpsol::init(opts);
// Set default options
solver_ = "ipopt";
// Read user options
for (auto&& op : opts) {
if (op.first=="solver") {
solver_ = op.first;
}
}
// Extract the expressions
casadi_assert(oracle().is_a("SXFunction"),
"Only SX supported currently.");
vector<SX> xp = oracle().sx_in();
vector<SX> fg = oracle()(xp);
// Get x, p, f and g
SX x = xp.at(NL_X);
SX p = xp.at(NL_P);
SX f = fg.at(NL_F);
SX g = fg.at(NL_G);
casadi_assert(p.is_empty(), "'p' currently not supported");
// Names of the variables, constraints
vector<string> x_name, g_name;
for (casadi_int i=0; i<nx_; ++i) x_name.push_back("x[" + str(i) + "]");
for (casadi_int i=0; i<ng_; ++i) g_name.push_back("g[" + str(i) + "]");
casadi_int max_x_name = x_name.back().size();
casadi_int max_g_name = g_name.empty() ? 0 : g_name.back().size();
// Calculate the Jacobian, gradient
Sparsity jac_g = SX::jacobian(g, x).sparsity();
Sparsity jac_f = SX::jacobian(f, x).sparsity();
// Extract the shared subexpressions
vector<SX> ex = {f, g}, v, vdef;
shared(ex, v, vdef);
f = ex[0];
g = ex[1];
// Header
nl_init_ << "g3 1 1 0\n";
// Type of constraints
nl_init_ << nx_ << " " // number of variables
<< ng_ << " " // number of constraints
<< 1 << " " // number of objectives
<< 0 << " " // number of ranges
<< 0 << " " // ?
<< 0 << "\n"; // number of logical constraints
// Nonlinearity - assume all nonlinear for now TODO: Detect
nl_init_ << ng_ << " " // nonlinear constraints
<< 1 << "\n"; // nonlinear objectives
// Network constraints
nl_init_ << 0 << " " // nonlinear
<< 0 << "\n"; // linear
// Nonlinear variables
nl_init_ << nx_ << " " // in constraints
<< nx_ << " " // in objectives
<< nx_ << "\n"; // in both
// Linear network ..
nl_init_ << 0 << " " // .. variables ..
<< 0 << " " // .. arith ..
<< 0 << " " // .. functions ..
<< 0 << "\n"; // .. flags
// Discrete variables
nl_init_ << 0 << " " // binary
<< 0 << " " // integer
<< 0 << " " // nonlinear in both
<< 0 << " " // nonlinear in constraints
<< 0 << "\n"; // nonlinear in objective
// Nonzeros in the Jacobian, gradients
nl_init_ << jac_g.nnz() << " " // nnz in Jacobian
<< jac_f.nnz() << "\n"; // nnz in gradients
// Maximum name length
nl_init_ << max_x_name << " " // constraints
<< max_g_name << "\n"; // variables
// Shared subexpressions
nl_init_ << v.size() << " " // both
<< 0 << " " // constraints
<< 0 << " " // objective
<< 0 << " " // c1 - constaint, but linear?
<< 0 << "\n"; // o1 - objective, but linear?
// Create a function which evaluates f and g
Function F("F", {vertcat(v), x}, {vertcat(vdef), f, g},
{"v", "x"}, {"vdef", "f", "g"});
// Iterate over the algoritm
//.........这里部分代码省略.........