本文整理汇总了C++中SX::sparsity方法的典型用法代码示例。如果您正苦于以下问题:C++ SX::sparsity方法的具体用法?C++ SX::sparsity怎么用?C++ SX::sparsity使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SX
的用法示例。
在下文中一共展示了SX::sparsity方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: init
void SymbolicQr::init() {
// Call the base class initializer
LinearSolverInternal::init();
// Read options
bool codegen = getOption("codegen");
string compiler = getOption("compiler");
// Make sure that command processor is available
if (codegen) {
#ifdef WITH_DL
int flag = system(static_cast<const char*>(0));
casadi_assert_message(flag!=0, "No command procesor available");
#else // WITH_DL
casadi_error("Codegen requires CasADi to be compiled with option \"WITH_DL\" enabled");
#endif // WITH_DL
}
// Symbolic expression for A
SX A = SX::sym("A", input(0).sparsity());
// Get the inverted column permutation
std::vector<int> inv_colperm(colperm_.size());
for (int k=0; k<colperm_.size(); ++k)
inv_colperm[colperm_[k]] = k;
// Get the inverted row permutation
std::vector<int> inv_rowperm(rowperm_.size());
for (int k=0; k<rowperm_.size(); ++k)
inv_rowperm[rowperm_[k]] = k;
// Permute the linear system
SX Aperm = A(rowperm_, colperm_);
// Generate the QR factorization function
vector<SX> QR(2);
qr(Aperm, QR[0], QR[1]);
SXFunction fact_fcn(A, QR);
// Optionally generate c code and load as DLL
if (codegen) {
stringstream ss;
ss << "symbolic_qr_fact_fcn_" << this;
fact_fcn_ = dynamicCompilation(fact_fcn, ss.str(),
"Symbolic QR factorization function", compiler);
} else {
fact_fcn_ = fact_fcn;
}
// Initialize factorization function
fact_fcn_.setOption("name", "QR_fact");
fact_fcn_.init();
// Symbolic expressions for solve function
SX Q = SX::sym("Q", QR[0].sparsity());
SX R = SX::sym("R", QR[1].sparsity());
SX b = SX::sym("b", input(1).size1(), 1);
// Solve non-transposed
// We have Pb' * Q * R * Px * x = b <=> x = Px' * inv(R) * Q' * Pb * b
// Permute the right hand sides
SX bperm = b(rowperm_, ALL);
// Solve the factorized system
SX xperm = casadi::solve(R, mul(Q.T(), bperm));
// Permute back the solution
SX x = xperm(inv_colperm, ALL);
// Generate the QR solve function
vector<SX> solv_in(3);
solv_in[0] = Q;
solv_in[1] = R;
solv_in[2] = b;
SXFunction solv_fcn(solv_in, x);
// Optionally generate c code and load as DLL
if (codegen) {
stringstream ss;
ss << "symbolic_qr_solv_fcn_N_" << this;
solv_fcn_N_ = dynamicCompilation(solv_fcn, ss.str(), "QR_solv_N", compiler);
} else {
solv_fcn_N_ = solv_fcn;
}
// Initialize solve function
solv_fcn_N_.setOption("name", "QR_solv");
solv_fcn_N_.init();
// Solve transposed
// We have (Pb' * Q * R * Px)' * x = b
// <=> Px' * R' * Q' * Pb * x = b
// <=> x = Pb' * Q * inv(R') * Px * b
// Permute the right hand side
bperm = b(colperm_, ALL);
// Solve the factorized system
xperm = mul(Q, casadi::solve(R.T(), bperm));
//.........这里部分代码省略.........
示例2: value
DM value(const SX& x, const std::vector<MX>& values=std::vector<MX>()) const {
return DM::nan(x.sparsity());
}
示例3: init
void Sqpmethod::init() {
// Call the init method of the base class
NlpSolverInternal::init();
// Read options
max_iter_ = getOption("max_iter");
max_iter_ls_ = getOption("max_iter_ls");
c1_ = getOption("c1");
beta_ = getOption("beta");
merit_memsize_ = getOption("merit_memory");
lbfgs_memory_ = getOption("lbfgs_memory");
tol_pr_ = getOption("tol_pr");
tol_du_ = getOption("tol_du");
regularize_ = getOption("regularize");
exact_hessian_ = getOption("hessian_approximation")=="exact";
min_step_size_ = getOption("min_step_size");
// Get/generate required functions
gradF();
jacG();
if (exact_hessian_) {
hessLag();
}
// Allocate a QP solver
Sparsity H_sparsity = exact_hessian_ ? hessLag().output().sparsity()
: Sparsity::dense(nx_, nx_);
H_sparsity = H_sparsity + Sparsity::diag(nx_);
Sparsity A_sparsity = jacG().isNull() ? Sparsity(0, nx_)
: jacG().output().sparsity();
// QP solver options
Dict qp_solver_options;
if (hasSetOption("qp_solver_options")) {
qp_solver_options = getOption("qp_solver_options");
}
// Allocate a QP solver
qp_solver_ = QpSolver("qp_solver", getOption("qp_solver"),
make_map("h", H_sparsity, "a", A_sparsity),
qp_solver_options);
// Lagrange multipliers of the NLP
mu_.resize(ng_);
mu_x_.resize(nx_);
// Lagrange gradient in the next iterate
gLag_.resize(nx_);
gLag_old_.resize(nx_);
// Current linearization point
x_.resize(nx_);
x_cand_.resize(nx_);
x_old_.resize(nx_);
// Constraint function value
gk_.resize(ng_);
gk_cand_.resize(ng_);
// Hessian approximation
Bk_ = DMatrix::zeros(H_sparsity);
// Jacobian
Jk_ = DMatrix::zeros(A_sparsity);
// Bounds of the QP
qp_LBA_.resize(ng_);
qp_UBA_.resize(ng_);
qp_LBX_.resize(nx_);
qp_UBX_.resize(nx_);
// QP solution
dx_.resize(nx_);
qp_DUAL_X_.resize(nx_);
qp_DUAL_A_.resize(ng_);
// Gradient of the objective
gf_.resize(nx_);
// Create Hessian update function
if (!exact_hessian_) {
// Create expressions corresponding to Bk, x, x_old, gLag and gLag_old
SX Bk = SX::sym("Bk", H_sparsity);
SX x = SX::sym("x", input(NLP_SOLVER_X0).sparsity());
SX x_old = SX::sym("x", x.sparsity());
SX gLag = SX::sym("gLag", x.sparsity());
SX gLag_old = SX::sym("gLag_old", x.sparsity());
SX sk = x - x_old;
SX yk = gLag - gLag_old;
SX qk = mul(Bk, sk);
// Calculating theta
SX skBksk = inner_prod(sk, qk);
SX omega = if_else(inner_prod(yk, sk) < 0.2 * inner_prod(sk, qk),
0.8 * skBksk / (skBksk - inner_prod(sk, yk)),
1);
yk = omega * yk + (1 - omega) * qk;
SX theta = 1. / inner_prod(sk, yk);
SX phi = 1. / inner_prod(qk, sk);
//.........这里部分代码省略.........
示例4: init
//.........这里部分代码省略.........
SX f2_z = ex[1];
SX f_z = ex[2];
// Modified function Z
enum ZIn{Z_U,Z_D,Z_LAM_X,Z_LAM_F2,Z_NUM_IN};
SXVector zfcn_in(Z_NUM_IN);
zfcn_in[Z_U] = u;
zfcn_in[Z_D] = d;
zfcn_in[Z_LAM_X] = lam_x;
zfcn_in[Z_LAM_F2] = lam_f2;
enum ZOut{Z_D_DEF,Z_F12,Z_NUM_OUT};
SXVector zfcn_out(Z_NUM_OUT);
zfcn_out[Z_D_DEF] = d_def;
zfcn_out[Z_F12] = vertcat(f1_z,f2_z);
SXFunction zfcn(zfcn_in,zfcn_out);
zfcn.init();
if(verbose_){
cout << "Generated reconstruction function ( " << zfcn.getAlgorithmSize() << " nodes)." << endl;
}
// Matrix A and B in lifted Newton
SX B = zfcn.jac(Z_U,Z_F12);
SX B1 = B(Slice(0,nf1),Slice(0,B.size2()));
SX B2 = B(Slice(nf1,B.size1()),Slice(0,B.size2()));
if(verbose_){
cout << "Formed B1 (dimension " << B1.size1() << "-by-" << B1.size2() << ", "<< B1.size() << " nonzeros) " <<
"and B2 (dimension " << B2.size1() << "-by-" << B2.size2() << ", "<< B2.size() << " nonzeros)." << endl;
}
// Step in u
SX du = ssym("du",nu);
SX dlam_f2 = ssym("dlam_f2",lam_f2.sparsity());
SX b1 = f1_z;
SX b2 = f2_z;
SX e;
if(nv > 0){
// Directional derivative of Z
vector<vector<SX> > Z_fwdSeed(2,zfcn_in);
vector<vector<SX> > Z_fwdSens(2,zfcn_out);
vector<vector<SX> > Z_adjSeed;
vector<vector<SX> > Z_adjSens;
Z_fwdSeed[0][Z_U].setZero();
Z_fwdSeed[0][Z_D] = -d;
Z_fwdSeed[0][Z_LAM_X].setZero();
Z_fwdSeed[0][Z_LAM_F2].setZero();
Z_fwdSeed[1][Z_U] = du;
Z_fwdSeed[1][Z_D] = -d;
Z_fwdSeed[1][Z_LAM_X].setZero();
Z_fwdSeed[1][Z_LAM_F2] = dlam_f2;
zfcn.eval(zfcn_in,zfcn_out,Z_fwdSeed,Z_fwdSens,Z_adjSeed,Z_adjSens);
b1 += Z_fwdSens[0][Z_F12](Slice(0,nf1));
b2 += Z_fwdSens[0][Z_F12](Slice(nf1,B.size1()));
e = Z_fwdSens[1][Z_D_DEF];
}
if(verbose_){
cout << "Formed b1 (dimension " << b1.size1() << "-by-" << b1.size2() << ", "<< b1.size() << " nonzeros) " <<
"and b2 (dimension " << b2.size1() << "-by-" << b2.size2() << ", "<< b2.size() << " nonzeros)." << endl;
}