本文整理汇总了C++中AbstractLinAlgPack::Mp_StMtMtM方法的典型用法代码示例。如果您正苦于以下问题:C++ AbstractLinAlgPack::Mp_StMtMtM方法的具体用法?C++ AbstractLinAlgPack::Mp_StMtMtM怎么用?C++ AbstractLinAlgPack::Mp_StMtMtM使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AbstractLinAlgPack
的用法示例。
在下文中一共展示了AbstractLinAlgPack::Mp_StMtMtM方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: do_step
bool ReducedHessianExactStd_Step::do_step(
Algorithm& _algo, poss_type step_poss, IterationPack::EDoStepType type
, poss_type assoc_step_poss)
{
using Teuchos::dyn_cast;
using DenseLinAlgPack::nonconst_sym;
using AbstractLinAlgPack::Mp_StMtMtM;
typedef AbstractLinAlgPack::MatrixSymDenseInitialize MatrixSymDenseInitialize;
typedef AbstractLinAlgPack::MatrixSymOp MatrixSymOp;
using ConstrainedOptPack::NLPSecondOrder;
NLPAlgo &algo = rsqp_algo(_algo);
NLPAlgoState &s = algo.rsqp_state();
NLPSecondOrder
#ifdef _WINDOWS
&nlp = dynamic_cast<NLPSecondOrder&>(algo.nlp());
#else
&nlp = dyn_cast<NLPSecondOrder>(algo.nlp());
#endif
MatrixSymOp
*HL_sym_op = dynamic_cast<MatrixSymOp*>(&s.HL().get_k(0));
EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
std::ostream& out = algo.track().journal_out();
// print step header.
if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
using IterationPack::print_algorithm_step;
print_algorithm_step( algo, step_poss, type, assoc_step_poss, out );
}
// problem size
size_type n = nlp.n(),
r = nlp.r(),
nind = n - r;
// Compute HL first (You may want to move this into its own step later?)
if( !s.lambda().updated_k(-1) ) {
if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
out << "Initializing lambda_km1 = nlp.get_lambda_init ... \n";
}
nlp.get_init_lagrange_mult( &s.lambda().set_k(-1).v(), NULL );
if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
out << "||lambda_km1||inf = " << s.lambda().get_k(-1).norm_inf() << std::endl;
}
if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
out << "lambda_km1 = \n" << s.lambda().get_k(-1)();
}
}
nlp.set_HL( HL_sym_op );
nlp.calc_HL( s.x().get_k(0)(), s.lambda().get_k(-1)(), false );
if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ITERATION_QUANTITIES) ) {
s.HL().get_k(0).output( out << "\nHL_k = \n" );
}
// If rHL has already been updated for this iteration then just leave it.
if( !s.rHL().updated_k(0) ) {
if( !HL_sym_op ) {
std::ostringstream omsg;
omsg
<< "ReducedHessianExactStd_Step::do_step(...) : Error, "
<< "The matrix HL with the concrete type "
<< typeName(s.HL().get_k(0)) << " does not support the "
<< "MatrixSymOp iterface";
throw std::logic_error( omsg.str() );
}
MatrixSymDenseInitialize
*rHL_sym_init = dynamic_cast<MatrixSymDenseInitialize*>(&s.rHL().set_k(0));
if( !rHL_sym_init ) {
std::ostringstream omsg;
omsg
<< "ReducedHessianExactStd_Step::do_step(...) : Error, "
<< "The matrix rHL with the concrete type "
<< typeName(s.rHL().get_k(0)) << " does not support the "
<< "MatrixSymDenseInitialize iterface";
throw std::logic_error( omsg.str() );
}
// Compute the dense reduced Hessian
DMatrix rHL_sym_store(nind,nind);
DMatrixSliceSym rHL_sym(rHL_sym_store(),BLAS_Cpp::lower);
Mp_StMtMtM( &rHL_sym, 1.0, MatrixSymOp::DUMMY_ARG, *HL_sym_op
, s.Z().get_k(0), BLAS_Cpp::no_trans, 0.0 );
if( (int)olevel >= (int)PRINT_ITERATION_QUANTITIES ) {
out << "\nLower triangular partion of dense reduced Hessian (ignore nonzeros above diagonal):\nrHL_dense = \n" << rHL_sym_store();
}
// Set the reduced Hessain
rHL_sym_init->initialize( rHL_sym );
if( (int)olevel >= (int)PRINT_ITERATION_QUANTITIES ) {
s.rHL().get_k(0).output( out << "\nrHL_k = \n" );
}
}
//.........这里部分代码省略.........