本文整理汇总了C++中TimeSeries::cbegin_values方法的典型用法代码示例。如果您正苦于以下问题:C++ TimeSeries::cbegin_values方法的具体用法?C++ TimeSeries::cbegin_values怎么用?C++ TimeSeries::cbegin_values使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TimeSeries
的用法示例。
在下文中一共展示了TimeSeries::cbegin_values方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: testCalibration
void GARCHTest::testCalibration() {
BOOST_TEST_MESSAGE("Testing GARCH model calibration ...");
Date start(7, July, 1962), d = start;
TimeSeries<Volatility> ts;
Garch11 garch(0.2, 0.3, 0.4);
GaussianGenerator rng(MersenneTwisterUniformRng(48));
Volatility r = 0.0, v = 0.0;
for (std::size_t i = 0; i < 50000; ++i, d += 1) {
v = garch.forecast(r, v);
r = rng.next().value * std::sqrt(v);
ts[d] = r;
}
// Default calibration; works fine in most cases
Garch11 cgarch1(ts);
Real f1 = cgarch1.logLikelihood();
Real f2 = -cgarch1.costFunction(ts.cbegin_values(), ts.cend_values(),
garch.alpha(), garch.beta(), garch.omega());
Results calibrated = { 0.207592, 0.281979, 0.204647, -0.0217413 };
CHECK(calibrated, cgarch1, alpha, tolerance);
CHECK(calibrated, cgarch1, beta, tolerance);
CHECK(calibrated, cgarch1, omega, tolerance);
CHECK(calibrated, cgarch1, logLikelihood, tolerance);
// Type 1 initial guess - no further optimization
Garch11 cgarch2(ts, Garch11::MomentMatchingGuess);
DummyOptimizationMethod m;
cgarch2.calibrate(ts, m, EndCriteria (3, 2, 0.0, 0.0, 0.0));
Results expected1 = { 0.265749, 0.156956, 0.230964, -0.0227179 };
CHECK(expected1, cgarch2, alpha, tolerance);
CHECK(expected1, cgarch2, beta, tolerance);
CHECK(expected1, cgarch2, omega, tolerance);
CHECK(expected1, cgarch2, logLikelihood, tolerance);
// Optimization from this initial guess
cgarch2.calibrate(ts);
CHECK(calibrated, cgarch2, alpha, tolerance);
CHECK(calibrated, cgarch2, beta, tolerance);
CHECK(calibrated, cgarch2, omega, tolerance);
CHECK(calibrated, cgarch2, logLikelihood, tolerance);
// Type 2 initial guess - no further optimization
Garch11 cgarch3(ts, Garch11::GammaGuess);
cgarch3.calibrate(ts, m, EndCriteria (3, 2, 0.0, 0.0, 0.0));
Results expected2 = { 0.269896, 0.211373, 0.207534, -0.022798 };
CHECK(expected2, cgarch3, alpha, tolerance);
CHECK(expected2, cgarch3, beta, tolerance);
CHECK(expected2, cgarch3, omega, tolerance);
CHECK(expected2, cgarch3, logLikelihood, tolerance);
// Optimization from this initial guess
cgarch3.calibrate(ts);
CHECK(calibrated, cgarch3, alpha, tolerance);
CHECK(calibrated, cgarch3, beta, tolerance);
CHECK(calibrated, cgarch3, omega, tolerance);
CHECK(calibrated, cgarch3, logLikelihood, tolerance);
// Double optimization using type 1 and 2 initial guesses
Garch11 cgarch4(ts, Garch11::DoubleOptimization);
cgarch4.calibrate(ts);
CHECK(calibrated, cgarch4, alpha, tolerance);
CHECK(calibrated, cgarch4, beta, tolerance);
CHECK(calibrated, cgarch4, omega, tolerance);
CHECK(calibrated, cgarch4, logLikelihood, tolerance);
// Alternative, gradient based optimization - usually gives worse
// results than simplex
LevenbergMarquardt lm;
cgarch4.calibrate(ts, lm, EndCriteria (100000, 500, 1e-8, 1e-8, 1e-8));
Results expected3 = { 0.265196, 0.277364, 0.678812, -0.216313 };
CHECK(expected3, cgarch4, alpha, tolerance);
CHECK(expected3, cgarch4, beta, tolerance);
CHECK(expected3, cgarch4, omega, tolerance);
CHECK(expected3, cgarch4, logLikelihood, tolerance);
}