本文整理匯總了C++中eigen::VectorXd::cwiseMax方法的典型用法代碼示例。如果您正苦於以下問題:C++ VectorXd::cwiseMax方法的具體用法?C++ VectorXd::cwiseMax怎麽用?C++ VectorXd::cwiseMax使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類eigen::VectorXd
的用法示例。
在下文中一共展示了VectorXd::cwiseMax方法的1個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。
示例1: omxSD
void omxSD(GradientOptimizerContext &rf)
{
int maxIter = rf.maxMajorIterations;
if (maxIter == -1) maxIter = 50000;
Eigen::VectorXd currEst(rf.numFree);
rf.copyToOptimizer(currEst.data());
int iter = 0;
double priorSpeed = 1.0, shrinkage = 0.7;
rf.setupSimpleBounds();
rf.informOut = INFORM_UNINITIALIZED;
{
int mode = 0;
rf.solFun(currEst.data(), &mode);
if (mode == -1) {
rf.informOut = INFORM_STARTING_VALUES_INFEASIBLE;
return;
}
}
double refFit = rf.getFit();
rf.grad.resize(rf.numFree);
fit_functional ff(rf);
Eigen::VectorXd majorEst = currEst;
while(++iter < maxIter && !isErrorRaised()) {
gradient_with_ref(rf.gradientAlgo, 1, rf.gradientIterations, rf.gradientStepSize,
ff, refFit, majorEst, rf.grad);
if (rf.verbose >= 3) mxPrintMat("grad", rf.grad);
if(rf.grad.norm() == 0)
{
rf.informOut = INFORM_CONVERGED_OPTIMUM;
if(rf.verbose >= 2) mxLog("After %i iterations, gradient achieves zero!", iter);
break;
}
int retries = 300;
double speed = std::min(priorSpeed, 1.0);
double bestSpeed = speed;
bool foundBetter = false;
Eigen::VectorXd bestEst(majorEst.size());
Eigen::VectorXd prevEst(majorEst.size());
Eigen::VectorXd searchDir = rf.grad;
searchDir /= searchDir.norm();
prevEst.setConstant(nan("uninit"));
while (--retries > 0 && !isErrorRaised()){
Eigen::VectorXd nextEst = majorEst - speed * searchDir;
nextEst = nextEst.cwiseMax(rf.solLB).cwiseMin(rf.solUB);
if (nextEst == prevEst) break;
prevEst = nextEst;
rf.checkActiveBoxConstraints(nextEst);
int mode = 0;
double fit = rf.solFun(nextEst.data(), &mode);
if (fit < refFit) {
foundBetter = true;
refFit = rf.getFit();
bestSpeed = speed;
bestEst = nextEst;
break;
}
speed *= shrinkage;
}
if (false && foundBetter) {
// In some tests, this did not help so it is not enabled.
// It might be worth testing more.
mxLog("trying larger step size");
retries = 3;
while (--retries > 0 && !isErrorRaised()){
speed *= 1.01;
Eigen::VectorXd nextEst = majorEst - speed * searchDir;
nextEst = nextEst.cwiseMax(rf.solLB).cwiseMin(rf.solUB);
rf.checkActiveBoxConstraints(nextEst);
int mode = 0;
double fit = rf.solFun(nextEst.data(), &mode);
if (fit < refFit) {
foundBetter = true;
refFit = rf.getFit();
bestSpeed = speed;
bestEst = nextEst;
}
}
}
if (!foundBetter) {
rf.informOut = INFORM_CONVERGED_OPTIMUM;
if(rf.verbose >= 2) mxLog("After %i iterations, cannot find better estimation along the gradient direction", iter);
break;
}
if (rf.verbose >= 2) mxLog("major fit %f bestSpeed %g", refFit, bestSpeed);
majorEst = bestEst;
//.........這裏部分代碼省略.........