本文整理汇总了C++中eigen::VectorXd::cwiseMax方法的典型用法代码示例。如果您正苦于以下问题:C++ VectorXd::cwiseMax方法的具体用法?C++ VectorXd::cwiseMax怎么用?C++ VectorXd::cwiseMax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::VectorXd
的用法示例。
在下文中一共展示了VectorXd::cwiseMax方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: omxSD
void omxSD(GradientOptimizerContext &rf)
{
int maxIter = rf.maxMajorIterations;
if (maxIter == -1) maxIter = 50000;
Eigen::VectorXd currEst(rf.numFree);
rf.copyToOptimizer(currEst.data());
int iter = 0;
double priorSpeed = 1.0, shrinkage = 0.7;
rf.setupSimpleBounds();
rf.informOut = INFORM_UNINITIALIZED;
{
int mode = 0;
rf.solFun(currEst.data(), &mode);
if (mode == -1) {
rf.informOut = INFORM_STARTING_VALUES_INFEASIBLE;
return;
}
}
double refFit = rf.getFit();
rf.grad.resize(rf.numFree);
fit_functional ff(rf);
Eigen::VectorXd majorEst = currEst;
while(++iter < maxIter && !isErrorRaised()) {
gradient_with_ref(rf.gradientAlgo, 1, rf.gradientIterations, rf.gradientStepSize,
ff, refFit, majorEst, rf.grad);
if (rf.verbose >= 3) mxPrintMat("grad", rf.grad);
if(rf.grad.norm() == 0)
{
rf.informOut = INFORM_CONVERGED_OPTIMUM;
if(rf.verbose >= 2) mxLog("After %i iterations, gradient achieves zero!", iter);
break;
}
int retries = 300;
double speed = std::min(priorSpeed, 1.0);
double bestSpeed = speed;
bool foundBetter = false;
Eigen::VectorXd bestEst(majorEst.size());
Eigen::VectorXd prevEst(majorEst.size());
Eigen::VectorXd searchDir = rf.grad;
searchDir /= searchDir.norm();
prevEst.setConstant(nan("uninit"));
while (--retries > 0 && !isErrorRaised()){
Eigen::VectorXd nextEst = majorEst - speed * searchDir;
nextEst = nextEst.cwiseMax(rf.solLB).cwiseMin(rf.solUB);
if (nextEst == prevEst) break;
prevEst = nextEst;
rf.checkActiveBoxConstraints(nextEst);
int mode = 0;
double fit = rf.solFun(nextEst.data(), &mode);
if (fit < refFit) {
foundBetter = true;
refFit = rf.getFit();
bestSpeed = speed;
bestEst = nextEst;
break;
}
speed *= shrinkage;
}
if (false && foundBetter) {
// In some tests, this did not help so it is not enabled.
// It might be worth testing more.
mxLog("trying larger step size");
retries = 3;
while (--retries > 0 && !isErrorRaised()){
speed *= 1.01;
Eigen::VectorXd nextEst = majorEst - speed * searchDir;
nextEst = nextEst.cwiseMax(rf.solLB).cwiseMin(rf.solUB);
rf.checkActiveBoxConstraints(nextEst);
int mode = 0;
double fit = rf.solFun(nextEst.data(), &mode);
if (fit < refFit) {
foundBetter = true;
refFit = rf.getFit();
bestSpeed = speed;
bestEst = nextEst;
}
}
}
if (!foundBetter) {
rf.informOut = INFORM_CONVERGED_OPTIMUM;
if(rf.verbose >= 2) mxLog("After %i iterations, cannot find better estimation along the gradient direction", iter);
break;
}
if (rf.verbose >= 2) mxLog("major fit %f bestSpeed %g", refFit, bestSpeed);
majorEst = bestEst;
//.........这里部分代码省略.........