本文整理汇总了C++中BaseLearner::copyState方法的典型用法代码示例。如果您正苦于以下问题:C++ BaseLearner::copyState方法的具体用法?C++ BaseLearner::copyState怎么用?C++ BaseLearner::copyState使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BaseLearner
的用法示例。
在下文中一共展示了BaseLearner::copyState方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
AlphaReal ProductLearner::run()
{
const int numClasses = _pTrainingData->getNumClasses();
const int numExamples = _pTrainingData->getNumExamples();
// Backup original labels
for (int i = 0; i < numExamples; ++i) {
const vector<Label>& labels = _pTrainingData->getLabels(i);
vector<char> exampleLabels;
for (int l = 0; l < numClasses; ++l)
exampleLabels.push_back(labels[l].y);
_savedLabels.push_back(exampleLabels);
}
for(int ib = 0; ib < _numBaseLearners; ++ib)
_baseLearners[ib]->setTrainingData(_pTrainingData);
AlphaReal energy = numeric_limits<AlphaReal>::max();
AlphaReal previousEnergy, hx, previousAlpha;
BaseLearner* pPreviousBaseLearner = 0;
bool firstLoop = true;
int ib = -1;
while (1) {
ib += 1;
if (ib >= _numBaseLearners) {
ib = 0;
firstLoop = false;
}
previousEnergy = energy;
previousAlpha = _alpha;
if (pPreviousBaseLearner)
delete pPreviousBaseLearner;
if ( !firstLoop ) {
// take the old learner off the labels
for (int i = 0; i < numExamples; ++i) {
vector<Label>& labels = _pTrainingData->getLabels(i);
for (int l = 0; l < numClasses; ++l) {
// Here we could have the option of using confidence rated setting so the
// real valued output of classify instead of its sign
hx = _baseLearners[ib]->classify(_pTrainingData,i,l);
if ( hx < 0 )
labels[l].y *= -1;
else if ( hx == 0 ) { // have to redo the multiplications, haven't been tested
for(int ib1 = 0; ib1 < _numBaseLearners && labels[l].y != 0; ++ib1) {
if (ib != ib1) {
hx = _baseLearners[ib1]->classify(_pTrainingData,i,l);
if (hx < 0)
labels[l].y *= -1;
else if (hx == 0)
labels[l].y = 0;
}
}
}
}
}
}
pPreviousBaseLearner = _baseLearners[ib]->copyState();
energy = _baseLearners[ib]->run();
_alpha = _baseLearners[ib]->getAlpha();
if (_verbose > 2) {
cout << "E[" << (ib+1) << "] = " << energy << endl << flush;
cout << "alpha[" << (ib+1) << "] = " << _alpha << endl << flush;
}
for (int i = 0; i < numExamples; ++i) {
vector<Label>& labels = _pTrainingData->getLabels(i);
for (int l = 0; l < numClasses; ++l) {
// Here we could have the option of using confidence rated setting so the
// real valued output of classify instead of its sign
if (labels[l].y != 0) { // perhaps replace it by nor_utils::is_zero(labels[l].y)
hx = _baseLearners[ib]->classify(_pTrainingData,i,l);
if ( hx < 0 )
labels[l].y *= -1;
else if ( hx == 0 )
labels[l].y = 0;
}
}
}
// We have to do at least one full iteration. For real it's not guaranteed
// Alternatively we could initialize all of them to constant
// if ( !firstLoop && energy >= previousEnergy ) {
// if (energy > previousEnergy) {
// _baseLearners[ib] = pPreviousBaseLearner->copyState();
// delete pPreviousBaseLearner;
// energy = previousEnergy;
// _alpha = _baseLearners[ib]->getAlpha();
// }
// break;
// }
if ( energy >= previousEnergy ) {
_alpha = previousAlpha;
energy = previousEnergy;
if (firstLoop) {
for(int ib2 = ib; ib2 < _numBaseLearners; ++ib2)
delete _baseLearners[ib2];
_numBaseLearners = ib;
}
else {
_baseLearners[ib] = pPreviousBaseLearner->copyState();
//.........这里部分代码省略.........
示例2: run
float BanditProductLearner::run()
{
if ( ! this->_banditAlgo->isInitialized() ) {
init();
}
// the bandit algorithm selects the subset the tree learner is allowed to use
// the armindexes will be stored in _armsForPulling
getArms();
const int numClasses = _pTrainingData->getNumClasses();
const int numExamples = _pTrainingData->getNumExamples();
// Backup original labels
for (int i = 0; i < numExamples; ++i) {
const vector<Label>& labels = _pTrainingData->getLabels(i);
vector<char> exampleLabels;
for (int l = 0; l < numClasses; ++l)
exampleLabels.push_back(labels[l].y);
_savedLabels.push_back(exampleLabels);
}
for(int ib = 0; ib < _numBaseLearners; ++ib)
_baseLearners[ib]->setTrainingData(_pTrainingData);
float energy = numeric_limits<float>::max();
float previousEnergy, hx, previousAlpha;
BaseLearner* pPreviousBaseLearner = 0;
bool firstLoop = true;
int ib = -1;
while (1) {
ib += 1;
if (ib >= _numBaseLearners) {
ib = 0;
firstLoop = false;
}
previousEnergy = energy;
previousAlpha = _alpha;
if (pPreviousBaseLearner)
delete pPreviousBaseLearner;
if ( !firstLoop ) {
// take the old learner off the labels
for (int i = 0; i < numExamples; ++i) {
vector<Label>& labels = _pTrainingData->getLabels(i);
for (int l = 0; l < numClasses; ++l) {
// Here we could have the option of using confidence rated setting so the
// real valued output of classify instead of its sign
hx = _baseLearners[ib]->classify(_pTrainingData,i,l);
if ( hx < 0 )
labels[l].y *= -1;
else if ( hx == 0 ) { // have to redo the multiplications, haven't been tested
for(int ib1 = 0; ib1 < _numBaseLearners && labels[l].y != 0; ++ib1) {
if (ib != ib1) {
hx = _baseLearners[ib1]->classify(_pTrainingData,i,l);
if (hx < 0)
labels[l].y *= -1;
else if (hx == 0)
labels[l].y = 0;
}
}
}
}
}
}
pPreviousBaseLearner = _baseLearners[ib]->copyState();
energy = dynamic_cast< FeaturewiseLearner* >(_baseLearners[ib])->run(_armsForPulling );
// check if it is signailing_nan
if ( energy != energy )
{
if (_verbose > 2) {
cout << "Cannot find weak hypothesis, constant learner is used!!" << endl;
}
BaseLearner* pConstantWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner("ConstantLearner");
BaseLearner* pConstantWeakHypothesis = pConstantWeakHypothesisSource->create() ;
pConstantWeakHypothesis->setTrainingData( _pTrainingData );
energy = pConstantWeakHypothesis->run();
delete _baseLearners[ib];
_baseLearners[ib] = pConstantWeakHypothesis;
}
_alpha = _baseLearners[ib]->getAlpha();
if (_verbose > 2) {
cout << "E[" << (ib+1) << "] = " << energy << endl << flush;
cout << "alpha[" << (ib+1) << "] = " << _alpha << endl << flush;
}
for (int i = 0; i < numExamples; ++i) {
vector<Label>& labels = _pTrainingData->getLabels(i);
for (int l = 0; l < numClasses; ++l) {
// Here we could have the option of using confidence rated setting so the
// real valued output of classify instead of its sign
if (labels[l].y != 0) { // perhaps replace it by nor_utils::is_zero(labels[l].y)
hx = _baseLearners[ib]->classify(_pTrainingData,i,l);
if ( hx < 0 )
labels[l].y *= -1;
else if ( hx == 0 )
labels[l].y = 0;
}
}
//.........这里部分代码省略.........