本文整理汇总了C++中BlockMatrix::addDense方法的典型用法代码示例。如果您正苦于以下问题:C++ BlockMatrix::addDense方法的具体用法?C++ BlockMatrix::addDense怎么用?C++ BlockMatrix::addDense使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BlockMatrix
的用法示例。
在下文中一共展示了BlockMatrix::addDense方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: evaluateSensitivities
returnValue BoundaryConstraint::evaluateSensitivities( const DMatrix &seed, BlockMatrix &hessian ){
// EVALUATION OF THE SENSITIVITIES:
// --------------------------------
int run1, run2;
const int nc = getNC();
const int N = grid.getNumPoints();
ASSERT( (int) seed.getNumRows() == nc );
double *bseed1 = new double[nc];
double *bseed2 = new double[nc];
double *R = new double[nc];
double *J1 = new double[fcn[0].getNumberOfVariables() +1];
double *H1 = new double[fcn[0].getNumberOfVariables() +1];
double *fseed1 = new double[fcn[0].getNumberOfVariables() +1];
double *J2 = new double[fcn[1].getNumberOfVariables() +1];
double *H2 = new double[fcn[1].getNumberOfVariables() +1];
double *fseed2 = new double[fcn[1].getNumberOfVariables() +1];
for( run1 = 0; run1 < nc; run1++ ){
bseed1[run1] = seed(run1,0);
bseed2[run1] = 0.0;
}
for( run1 = 0; run1 < fcn[0].getNumberOfVariables()+1; run1++ )
fseed1[run1] = 0.0;
for( run1 = 0; run1 < fcn[1].getNumberOfVariables()+1; run1++ )
fseed2[run1] = 0.0;
dBackward.init( 1, 5*N );
DMatrix Dx ( nc, nx );
DMatrix Dxa( nc, na );
DMatrix Dp ( nc, np );
DMatrix Du ( nc, nu );
DMatrix Dw ( nc, nw );
DMatrix Hx ( nx, nx );
DMatrix Hxa( nx, na );
DMatrix Hp ( nx, np );
DMatrix Hu ( nx, nu );
DMatrix Hw ( nx, nw );
for( run2 = 0; run2 < nx; run2++ ){
// FIRST ORDER DERIVATIVES:
// ------------------------
fseed1[y_index[0][run2]] = 1.0;
fcn[0].AD_forward( 0, fseed1, R );
for( run1 = 0; run1 < nc; run1++ )
Dx( run1, run2 ) = R[run1];
fseed1[y_index[0][run2]] = 0.0;
// SECOND ORDER DERIVATIVES:
// -------------------------
for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
J1[run1] = 0.0;
H1[run1] = 0.0;
}
fcn[0].AD_backward2( 0, bseed1, bseed2, J1, H1 );
for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2, run1 ) = -H1[y_index[0][run1]];
for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2, run1-nx ) = -H1[y_index[0][run1]];
for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2, run1-nx-na ) = -H1[y_index[0][run1]];
for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2, run1-nx-na-np ) = -H1[y_index[0][run1]];
for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2, run1-nx-na-np-nu ) = -H1[y_index[0][run1]];
}
if( nx > 0 ){
dBackward.setDense( 0, 0, Dx );
if( nx > 0 ) hessian.addDense( 0, 0, Hx );
if( na > 0 ) hessian.addDense( 0, N, Hxa );
if( np > 0 ) hessian.addDense( 0, 2*N, Hp );
if( nu > 0 ) hessian.addDense( 0, 3*N, Hu );
if( nw > 0 ) hessian.addDense( 0, 4*N, Hw );
}
Hx.init ( nx, nx );
Hxa.init( nx, na );
Hp.init ( nx, np );
Hu.init ( nx, nu );
Hw.init ( nx, nw );
for( run2 = 0; run2 < nx; run2++ ){
// FIRST ORDER DERIVATIVES:
// ------------------------
fseed2[y_index[1][run2]] = 1.0;
fcn[1].AD_forward( 0, fseed2, R );
for( run1 = 0; run1 < nc; run1++ )
//.........这里部分代码省略.........
示例2: evaluateSensitivities
returnValue PointConstraint::evaluateSensitivities( const DMatrix &seed, BlockMatrix &hessian ){
// EVALUATION OF THE SENSITIVITIES:
// --------------------------------
int run1, run2;
if( fcn == 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
const int nc = fcn[0].getDim();
const int N = grid.getNumPoints();
ASSERT( (int) seed.getNumRows() == nc );
double *bseed1 = new double[nc];
double *bseed2 = new double[nc];
double *R = new double[nc];
double *J = new double[fcn[0].getNumberOfVariables() +1];
double *H = new double[fcn[0].getNumberOfVariables() +1];
double *fseed = new double[fcn[0].getNumberOfVariables() +1];
for( run1 = 0; run1 < nc; run1++ ){
bseed1[run1] = seed(run1,0);
bseed2[run1] = 0.0;
}
for( run1 = 0; run1 < fcn[0].getNumberOfVariables()+1; run1++ )
fseed[run1] = 0.0;
dBackward.init( 1, 5*N );
DMatrix Dx ( nc, nx );
DMatrix Dxa( nc, na );
DMatrix Dp ( nc, np );
DMatrix Du ( nc, nu );
DMatrix Dw ( nc, nw );
DMatrix Hx ( nx, nx );
DMatrix Hxa( nx, na );
DMatrix Hp ( nx, np );
DMatrix Hu ( nx, nu );
DMatrix Hw ( nx, nw );
for( run2 = 0; run2 < nx; run2++ ){
// FIRST ORDER DERIVATIVES:
// ------------------------
fseed[y_index[0][run2]] = 1.0;
fcn[0].AD_forward( 0, fseed, R );
for( run1 = 0; run1 < nc; run1++ )
Dx( run1, run2 ) = R[run1];
fseed[y_index[0][run2]] = 0.0;
// SECOND ORDER DERIVATIVES:
// -------------------------
for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
J[run1] = 0.0;
H[run1] = 0.0;
}
fcn[0].AD_backward2( 0, bseed1, bseed2, J, H );
for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2, run1 ) = -H[y_index[0][run1]];
for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2, run1-nx ) = -H[y_index[0][run1]];
for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2, run1-nx-na ) = -H[y_index[0][run1]];
for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2, run1-nx-na-np ) = -H[y_index[0][run1]];
for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2, run1-nx-na-np-nu ) = -H[y_index[0][run1]];
}
if( nx > 0 ){
dBackward.setDense( 0, point_index, Dx );
if( nx > 0 ) hessian.addDense( point_index, point_index, Hx );
if( na > 0 ) hessian.addDense( point_index, N + point_index, Hxa );
if( np > 0 ) hessian.addDense( point_index, 2*N + point_index, Hp );
if( nu > 0 ) hessian.addDense( point_index, 3*N + point_index, Hu );
if( nw > 0 ) hessian.addDense( point_index, 4*N + point_index, Hw );
}
Hx.init ( na, nx );
Hxa.init( na, na );
Hp.init ( na, np );
Hu.init ( na, nu );
Hw.init ( na, nw );
for( run2 = nx; run2 < nx+na; run2++ ){
// FIRST ORDER DERIVATIVES:
// ------------------------
fseed[y_index[0][run2]] = 1.0;
fcn[0].AD_forward( 0, fseed, R );
for( run1 = 0; run1 < nc; run1++ )
Dxa( run1, run2-nx ) = R[run1];
fseed[y_index[0][run2]] = 0.0;
// SECOND ORDER DERIVATIVES:
// -------------------------
for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
J[run1] = 0.0;
//.........这里部分代码省略.........
示例3: evaluateSensitivities
returnValue ShootingMethod::evaluateSensitivities( const BlockMatrix &seed, BlockMatrix &hessian ){
const int NN = N+1;
dForward.init( N, 5 );
int i;
for( i = 0; i < N; i++ ){
Matrix X, P, U, W, D, E, HX, HP, HU, HW, S;
if( xSeed.isEmpty() == BT_FALSE ) xSeed.getSubBlock( i, 0, X );
if( pSeed.isEmpty() == BT_FALSE ) pSeed.getSubBlock( i, 0, P );
if( uSeed.isEmpty() == BT_FALSE ) uSeed.getSubBlock( i, 0, U );
if( wSeed.isEmpty() == BT_FALSE ) wSeed.getSubBlock( i, 0, W );
seed.getSubBlock( i, 0, S, nx, 1 );
if( nx > 0 ){
ACADO_TRY( differentiateForwardBackward( i, X, E, E, E, S, D, HX, HP, HU, HW ));
dForward.setDense( i, 0, D );
if( nx > 0 ) hessian.addDense( i, i, HX );
if( np > 0 ) hessian.addDense( i, 2*NN+i, HP );
if( nu > 0 ) hessian.addDense( i, 3*NN+i, HU );
if( nw > 0 ) hessian.addDense( i, 4*NN+i, HW );
}
if( np > 0 ){
ACADO_TRY( differentiateForwardBackward( i, E, P, E, E, S, D, HX, HP, HU, HW ));
dForward.setDense( i, 2, D );
if( nx > 0 ) hessian.addDense( 2*NN+i, i, HX );
if( np > 0 ) hessian.addDense( 2*NN+i, 2*NN+i, HP );
if( nu > 0 ) hessian.addDense( 2*NN+i, 3*NN+i, HU );
if( nw > 0 ) hessian.addDense( 2*NN+i, 4*NN+i, HW );
}
if( nu > 0 ){
ACADO_TRY( differentiateForwardBackward( i, E, E, U, E, S, D, HX, HP, HU, HW ));
dForward.setDense( i, 3, D );
if( nx > 0 ) hessian.addDense( 3*NN+i, i, HX );
if( np > 0 ) hessian.addDense( 3*NN+i, 2*NN+i, HP );
if( nu > 0 ) hessian.addDense( 3*NN+i, 3*NN+i, HU );
if( nw > 0 ) hessian.addDense( 3*NN+i, 4*NN+i, HW );
}
if( nw > 0 ){
ACADO_TRY( differentiateForwardBackward( i, E, E, E, W, S, D, HX, HP, HU, HW ));
dForward.setDense( i, 4, D );
if( nx > 0 ) hessian.addDense( 4*NN+i, i, HX );
if( np > 0 ) hessian.addDense( 4*NN+i, 2*NN+i, HP );
if( nu > 0 ) hessian.addDense( 4*NN+i, 3*NN+i, HU );
if( nw > 0 ) hessian.addDense( 4*NN+i, 4*NN+i, HW );
}
}
return SUCCESSFUL_RETURN;
}