本文整理汇总了C++中BaseLearner::setTrainingData方法的典型用法代码示例。如果您正苦于以下问题:C++ BaseLearner::setTrainingData方法的具体用法?C++ BaseLearner::setTrainingData怎么用?C++ BaseLearner::setTrainingData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BaseLearner
的用法示例。
在下文中一共展示了BaseLearner::setTrainingData方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: loadHypothesis
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
void UnSerialization::loadHypothesis(nor_utils::StreamTokenizer& st,
vector<BaseLearner*>& weakHypotheses,
InputData* pTrainingData, int verbose)
{
string basicLearnerName = seekAndParseEnclosedValue<string>(st, "weakLearner");
// Check if the weak learner exists
if ( !BaseLearner::RegisteredLearners().hasLearner(basicLearnerName) ) {
cerr << "ERROR: Weak learner <" << basicLearnerName << "> not registered!!" << endl;
exit(1);
}
// allocate the weak learner object
BaseLearner* pWeakHypothesis =
BaseLearner::RegisteredLearners().getLearner(basicLearnerName)->create();
pWeakHypothesis->setTrainingData(pTrainingData);
// load it
pWeakHypothesis->load(st);
// at least </weakhyp> should be expected,
// therefore this was a broken weak learner
if ( !st.has_token() ) {
cerr << "WARNING: Incomplete weak hypothesis file found. Check the shyp file!" << endl;
delete pWeakHypothesis;
return;
}
// store it in the vector
weakHypotheses.push_back(pWeakHypothesis);
// show some progress while loading on verbose > 1
if (verbose > 1 && weakHypotheses.size() % 1000 == 0)
cout << "." << flush;
}
示例2: run
void FilterBoostLearner::run(const nor_utils::Args& args)
{
// load the arguments
this->getArgs(args);
time_t startTime, currentTime;
time(&startTime);
// get the registered weak learner (type from name)
BaseLearner* pWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner(_baseLearnerName);
// initialize learning options; normally it's done in the strong loop
// also, here we do it for Product learners, so input data can be created
pWeakHypothesisSource->initLearningOptions(args);
BaseLearner* pConstantWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner("ConstantLearner");
// get the training input data, and load it
InputData* pTrainingData = pWeakHypothesisSource->createInputData();
pTrainingData->initOptions(args);
pTrainingData->load(_trainFileName, IT_TRAIN, _verbose);
const int numClasses = pTrainingData->getNumClasses();
const int numExamples = pTrainingData->getNumExamples();
//initialize the margins variable
_margins.resize( numExamples );
for( int i=0; i<numExamples; i++ )
{
_margins[i].resize( numClasses );
fill( _margins[i].begin(), _margins[i].end(), 0.0 );
}
// get the testing input data, and load it
InputData* pTestData = NULL;
if ( !_testFileName.empty() )
{
pTestData = pWeakHypothesisSource->createInputData();
pTestData->initOptions(args);
pTestData->load(_testFileName, IT_TEST, _verbose);
}
// The output information object
OutputInfo* pOutInfo = NULL;
if ( !_outputInfoFile.empty() )
{
// Baseline: constant classifier - goes into 0th iteration
BaseLearner* pConstantWeakHypothesis = pConstantWeakHypothesisSource->create() ;
pConstantWeakHypothesis->initLearningOptions(args);
pConstantWeakHypothesis->setTrainingData(pTrainingData);
float constantEnergy = pConstantWeakHypothesis->run();
pOutInfo = new OutputInfo(_outputInfoFile);
pOutInfo->initialize(pTrainingData);
updateMargins( pTrainingData, pConstantWeakHypothesis );
if (pTestData)
pOutInfo->initialize(pTestData);
pOutInfo->outputHeader();
pOutInfo->outputIteration(-1);
pOutInfo->outputError(pTrainingData, pConstantWeakHypothesis);
if (pTestData)
pOutInfo->outputError(pTestData, pConstantWeakHypothesis);
/*
pOutInfo->outputMargins(pTrainingData, pConstantWeakHypothesis);
pOutInfo->outputEdge(pTrainingData, pConstantWeakHypothesis);
if (pTestData)
pOutInfo->outputMargins(pTestData, pConstantWeakHypothesis);
pOutInfo->outputMAE(pTrainingData);
if (pTestData)
pOutInfo->outputMAE(pTestData);
*/
pOutInfo->outputCurrentTime();
pOutInfo->endLine();
pOutInfo->initialize(pTrainingData);
if (pTestData)
pOutInfo->initialize(pTestData);
}
// reload the previously found weak learners if -resume is set.
// otherwise just return 0
int startingIteration = resumeWeakLearners(pTrainingData);
Serialization ss(_shypFileName, _isShypCompressed );
ss.writeHeader(_baseLearnerName); // this must go after resumeProcess has been called
//.........这里部分代码省略.........
示例3: run
void FilterBoostLearner::run(const nor_utils::Args& args)
{
// load the arguments
this->getArgs(args);
time_t startTime, currentTime;
time(&startTime);
// get the registered weak learner (type from name)
BaseLearner* pWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner(_baseLearnerName);
// initialize learning options; normally it's done in the strong loop
// also, here we do it for Product learners, so input data can be created
pWeakHypothesisSource->initLearningOptions(args);
BaseLearner* pConstantWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner("ConstantLearner");
// get the training input data, and load it
InputData* pTrainingData = pWeakHypothesisSource->createInputData();
pTrainingData->initOptions(args);
pTrainingData->load(_trainFileName, IT_TRAIN, _verbose);
const int numClasses = pTrainingData->getNumClasses();
const int numExamples = pTrainingData->getNumExamples();
//initialize the margins variable
_margins.resize( numExamples );
for( int i=0; i<numExamples; i++ )
{
_margins[i].resize( numClasses );
fill( _margins[i].begin(), _margins[i].end(), 0.0 );
}
// get the testing input data, and load it
InputData* pTestData = NULL;
if ( !_testFileName.empty() )
{
pTestData = pWeakHypothesisSource->createInputData();
pTestData->initOptions(args);
pTestData->load(_testFileName, IT_TEST, _verbose);
}
// The output information object
OutputInfo* pOutInfo = NULL;
if ( !_outputInfoFile.empty() )
{
// Baseline: constant classifier - goes into 0th iteration
BaseLearner* pConstantWeakHypothesis = pConstantWeakHypothesisSource->create() ;
pConstantWeakHypothesis->initLearningOptions(args);
pConstantWeakHypothesis->setTrainingData(pTrainingData);
AlphaReal constantEnergy = pConstantWeakHypothesis->run();
pOutInfo = new OutputInfo(args);
pOutInfo->initialize(pTrainingData);
updateMargins( pTrainingData, pConstantWeakHypothesis );
if (pTestData)
pOutInfo->initialize(pTestData);
pOutInfo->outputHeader(pTrainingData->getClassMap() );
pOutInfo->outputIteration(-1);
pOutInfo->outputCustom(pTrainingData, pConstantWeakHypothesis);
if (pTestData)
{
pOutInfo->separator();
pOutInfo->outputCustom(pTestData, pConstantWeakHypothesis);
}
pOutInfo->outputCurrentTime();
pOutInfo->endLine();
pOutInfo->initialize(pTrainingData);
if (pTestData)
pOutInfo->initialize(pTestData);
}
// reload the previously found weak learners if -resume is set.
// otherwise just return 0
int startingIteration = resumeWeakLearners(pTrainingData);
Serialization ss(_shypFileName, _isShypCompressed );
ss.writeHeader(_baseLearnerName); // this must go after resumeProcess has been called
// perform the resuming if necessary. If not it will just return
resumeProcess(ss, pTrainingData, pTestData, pOutInfo);
if (_verbose == 1)
cout << "Learning in progress..." << endl;
///////////////////////////////////////////////////////////////////////
// Starting the AdaBoost main loop
//.........这里部分代码省略.........
示例4: calculateChildrenAndEnergies
void TreeLearnerUCT::calculateChildrenAndEnergies( NodePointUCT& bLearner, int depthIndex ) {
bLearner._extended = true;
_pTrainingData->loadIndexSet( bLearner._learnerIdxSet );
//separate the dataset
set< int > idxPos, idxNeg;
idxPos.clear();
idxNeg.clear();
float phix;
for (int i = 0; i < _pTrainingData->getNumExamples(); ++i) {
// this returns the phi value of classifier
phix = bLearner._learner->classify(_pTrainingData,i,0);
if ( phix < 0 )
idxNeg.insert( _pTrainingData->getRawIndex( i ) );
else if ( phix > 0 ) { // have to redo the multiplications, haven't been tested
idxPos.insert( _pTrainingData->getRawIndex( i ) );
}
}
if ( (idxPos.size() < 1 ) || (idxNeg.size() < 1 ) ) {
//retval.clear();
bLearner._extended = false;
//return retval;
}
_pTrainingData->loadIndexSet( idxPos );
if ( ! _pTrainingData->isSamplesFromOneClass() ) {
BaseLearner* posLearner = _baseLearners[0]->copyState();
//posLearner->run();
dynamic_cast<FeaturewiseLearner*>(posLearner)->run( depthIndex );
//
//float posEdge = getEdge( posLearner, _pTrainingData );
posLearner->setTrainingData( _pTrainingData );
bLearner._leftEdge = posLearner->getEdge();
//tmpPair.first = posEdge;
//tmpPair.second.first.first = posLearner;
bLearner._leftChild = posLearner;
//set the parent idx to zero
//tmpPair.second.first.second.first = 0;
//this means that it will be a left child in the tree
//tmpPair.second.first.second.second = 0;
//tmpPair.second.second = idxPos;
bLearner._leftChildIdxSet = idxPos;
} else {
BaseLearner* pConstantWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner("ConstantLearner");
BaseLearner* posLearner = pConstantWeakHypothesisSource->create();
posLearner->setTrainingData(_pTrainingData);
//float constantEnergy = posLearner->run();
dynamic_cast<FeaturewiseLearner*>(posLearner)->run( depthIndex );
//BaseLearner* posLearner = _baseLearners[0]->copyState();
//float posEdge = getEdge( posLearner, _pTrainingData );
posLearner->setTrainingData( _pTrainingData );
bLearner._leftEdge = posLearner->getEdge();
//tmpPair.first = posEdge;
//tmpPair.second.first.first = posLearner;
bLearner._leftChild = posLearner;
//set the parent idx to zero
//tmpPair.second.first.second.first = 0;
//this means that it will be a left child in the tree
//tmpPair.second.first.second.second = 0;
//tmpPair.second.second = idxPos;
bLearner._leftChildIdxSet = idxPos;
}
//retval.push_back( tmpPair );
_pTrainingData->loadIndexSet( idxNeg );
if ( ! _pTrainingData->isSamplesFromOneClass() ) {
BaseLearner* negLearner = _baseLearners[0]->copyState();
//negLearner->run();
dynamic_cast<FeaturewiseLearner*>(negLearner)->run( depthIndex );
//float negEdge = getEdge( negLearner, _pTrainingData );
negLearner->setTrainingData( _pTrainingData );
bLearner._rightEdge = negLearner->getEdge();
//tmpPair.first = negEdge;
//tmpPair.second.first.first = negLearner;
bLearner._rightChild = negLearner;
//set the parent idx to zero
//tmpPair.second.first.second.first = 0;
//this means that it will be a right child in the tree
//tmpPair.second.first.second.second = 1;
//tmpPair.second.second = idxNeg;
bLearner._rightChildIdxSet = idxNeg;
} else {
BaseLearner* pConstantWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner("ConstantLearner");
BaseLearner* negLearner = pConstantWeakHypothesisSource->create();
//.........这里部分代码省略.........
示例5: run
AlphaReal TreeLearnerUCT::run()
{
if ( _numOfCalling == 0 ) {
if (_verbose > 0) {
cout << "Initializing tree..." << endl;
}
InnerNodeUCTSparse::setDepth( _numBaseLearners );
InnerNodeUCTSparse::setBranchOrder( _pTrainingData->getNumAttributes() );
_root.setChildrenNum();
//createUCTTree();
}
_numOfCalling++;
set< int > tmpIdx, idxPos, idxNeg;
_pTrainingData->clearIndexSet();
for( int i = 0; i < _pTrainingData->getNumExamples(); i++ ) tmpIdx.insert( i );
vector< int > trajectory(0);
_root.getBestTrajectory( trajectory );
// for UCT
for(int ib = 0; ib < _numBaseLearners; ++ib)
_baseLearners[ib]->setTrainingData(_pTrainingData);
AlphaReal edge = numeric_limits<AlphaReal>::max();
BaseLearner* pPreviousBaseLearner = 0;
//floatBaseLearner tmpPair, tmpPairPos, tmpPairNeg;
// for storing the inner point (learneres) which will be extended
//vector< floatBaseLearner > bLearnerVector;
InnerNodeType innerNode;
priority_queue<InnerNodeType, deque<InnerNodeType>, greater_first<InnerNodeType> > pq;
//train the first learner
//_baseLearners[0]->run();
pPreviousBaseLearner = _baseLearners[0]->copyState();
dynamic_cast<FeaturewiseLearner*>(pPreviousBaseLearner)->run( trajectory[0] );
//this contains the number of baselearners
int ib = 0;
NodePointUCT tmpNodePoint, nodeLeft, nodeRight;
////////////////////////////////////////////////////////
//set the edge
//tmpPair.first = getEdge( pPreviousBaseLearner, _pTrainingData );
//tmpPair.second.first.first = pPreviousBaseLearner;
// set the pointer of the parent
//tmpPair.second.first.second.first = 0;
// set that this is a neg child
//tmpPair.second.first.second.second = 0;
//tmpPair.second.second = tmpIdx;
//bLearnerVector = calculateChildrenAndEnergies( tmpPair );
///
pPreviousBaseLearner->setTrainingData( _pTrainingData );
tmpNodePoint._edge = pPreviousBaseLearner->getEdge();
tmpNodePoint._learner = pPreviousBaseLearner;
tmpNodePoint._idx = 0;
tmpNodePoint._depth = 0;
tmpNodePoint._learnerIdxSet = tmpIdx;
calculateChildrenAndEnergies( tmpNodePoint, trajectory[1] );
////////////////////////////////////////////////////////
//insert the root into the priority queue
if ( tmpNodePoint._extended )
{
if (_verbose > 2) {
//cout << "Edges: (parent, pos, neg): " << bLearnerVector[0].first << " " << bLearnerVector[1].first << " " << bLearnerVector[2].first << endl << flush;
//cout << "alpha[" << (ib) << "] = " << _alpha << endl << flush;
cout << "Edges: (parent, pos, neg): " << tmpNodePoint._edge << " " << tmpNodePoint._leftEdge << " " << tmpNodePoint._rightEdge << endl << flush;
}
// if the energy is getting higher then we push it into the priority queue
if ( tmpNodePoint._edge < ( tmpNodePoint._leftEdge + tmpNodePoint._rightEdge ) ) {
float deltaEdge = abs( tmpNodePoint._edge - ( tmpNodePoint._leftEdge + tmpNodePoint._rightEdge ) );
innerNode.first = deltaEdge;
innerNode.second = tmpNodePoint;
pq.push( innerNode );
} else {
//delete bLearnerVector[0].second.first.first;
delete tmpNodePoint._leftChild;
delete tmpNodePoint._rightChild;
}
}
if ( pq.empty() ) {
//.........这里部分代码省略.........
示例6: run
// -------------------------------------------------------------------------
void AdaBoostMHLearner::run( const nor_utils::Args& args, InputData* pTrainingData, const string baseLearnerName, const int numIterations, vector<BaseLearner*>& foundHypotheses )
{
// get the registered weak learner (type from name)
BaseLearner* pWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner(baseLearnerName);
// initialize learning options; normally it's done in the strong loop
// also, here we do it for Product learners, so input data can be created
pWeakHypothesisSource->initLearningOptions(args);
BaseLearner* pConstantWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner("ConstantLearner");
if (_verbose == 1)
cout << "Learning in progress..." << endl;
///////////////////////////////////////////////////////////////////////
// Starting the AdaBoost main loop
///////////////////////////////////////////////////////////////////////
for (int t = 0; t < numIterations; ++t)
{
if ((_verbose > 0)&&((t%100)==0))
cout << "--------------[ Boosting iteration " << (t+1) << " ]--------------" << endl;
BaseLearner* pWeakHypothesis = pWeakHypothesisSource->create();
pWeakHypothesis->initLearningOptions(args);
//pTrainingData->clearIndexSet();
pWeakHypothesis->setTrainingData(pTrainingData);
AlphaReal energy = pWeakHypothesis->run();
//float gamma = pWeakHypothesis->getEdge();
//cout << gamma << endl;
if ( (_withConstantLearner) || ( energy != energy ) ) // check constant learner if user wants it (if energi is nan, then we chose constant learner
{
BaseLearner* pConstantWeakHypothesis = pConstantWeakHypothesisSource->create() ;
pConstantWeakHypothesis->initLearningOptions(args);
pConstantWeakHypothesis->setTrainingData(pTrainingData);
AlphaReal constantEnergy = pConstantWeakHypothesis->run();
if ( (constantEnergy <= energy) || ( energy != energy ) ) {
delete pWeakHypothesis;
pWeakHypothesis = pConstantWeakHypothesis;
}
}
if (_verbose > 1)
cout << "Weak learner: " << pWeakHypothesis->getName()<< endl;
// Updates the weights and returns the edge
AlphaReal gamma = updateWeights(pTrainingData, pWeakHypothesis);
if (_verbose > 1)
{
cout << setprecision(5)
<< "--> Alpha = " << pWeakHypothesis->getAlpha() << endl
<< "--> Edge = " << gamma << endl
<< "--> Energy = " << energy << endl
// << "--> ConstantEnergy = " << constantEnergy << endl
// << "--> difference = " << (energy - constantEnergy) << endl
;
}
// If gamma <= theta the algorithm must stop.
// If theta == 0 and gamma is 0, it means that the weak learner is no better than chance
// and no further training is possible.
if (gamma <= _theta)
{
if (_verbose > 0)
{
cout << "Can't train any further: edge = " << gamma
<< " (with and edge offset (theta)=" << _theta << ")" << endl;
}
// delete pWeakHypothesis;
// break;
}
// Add it to the internal list of weak hypotheses
foundHypotheses.push_back(pWeakHypothesis);
} // loop on iterations
/////////////////////////////////////////////////////////
if (_verbose > 0)
cout << "--------------[ AdaBoost Learning completed. ]--------------" << endl;
}
示例7: run
float BanditProductLearner::run()
{
if ( ! this->_banditAlgo->isInitialized() ) {
init();
}
// the bandit algorithm selects the subset the tree learner is allowed to use
// the armindexes will be stored in _armsForPulling
getArms();
const int numClasses = _pTrainingData->getNumClasses();
const int numExamples = _pTrainingData->getNumExamples();
// Backup original labels
for (int i = 0; i < numExamples; ++i) {
const vector<Label>& labels = _pTrainingData->getLabels(i);
vector<char> exampleLabels;
for (int l = 0; l < numClasses; ++l)
exampleLabels.push_back(labels[l].y);
_savedLabels.push_back(exampleLabels);
}
for(int ib = 0; ib < _numBaseLearners; ++ib)
_baseLearners[ib]->setTrainingData(_pTrainingData);
float energy = numeric_limits<float>::max();
float previousEnergy, hx, previousAlpha;
BaseLearner* pPreviousBaseLearner = 0;
bool firstLoop = true;
int ib = -1;
while (1) {
ib += 1;
if (ib >= _numBaseLearners) {
ib = 0;
firstLoop = false;
}
previousEnergy = energy;
previousAlpha = _alpha;
if (pPreviousBaseLearner)
delete pPreviousBaseLearner;
if ( !firstLoop ) {
// take the old learner off the labels
for (int i = 0; i < numExamples; ++i) {
vector<Label>& labels = _pTrainingData->getLabels(i);
for (int l = 0; l < numClasses; ++l) {
// Here we could have the option of using confidence rated setting so the
// real valued output of classify instead of its sign
hx = _baseLearners[ib]->classify(_pTrainingData,i,l);
if ( hx < 0 )
labels[l].y *= -1;
else if ( hx == 0 ) { // have to redo the multiplications, haven't been tested
for(int ib1 = 0; ib1 < _numBaseLearners && labels[l].y != 0; ++ib1) {
if (ib != ib1) {
hx = _baseLearners[ib1]->classify(_pTrainingData,i,l);
if (hx < 0)
labels[l].y *= -1;
else if (hx == 0)
labels[l].y = 0;
}
}
}
}
}
}
pPreviousBaseLearner = _baseLearners[ib]->copyState();
energy = dynamic_cast< FeaturewiseLearner* >(_baseLearners[ib])->run(_armsForPulling );
// check if it is signailing_nan
if ( energy != energy )
{
if (_verbose > 2) {
cout << "Cannot find weak hypothesis, constant learner is used!!" << endl;
}
BaseLearner* pConstantWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner("ConstantLearner");
BaseLearner* pConstantWeakHypothesis = pConstantWeakHypothesisSource->create() ;
pConstantWeakHypothesis->setTrainingData( _pTrainingData );
energy = pConstantWeakHypothesis->run();
delete _baseLearners[ib];
_baseLearners[ib] = pConstantWeakHypothesis;
}
_alpha = _baseLearners[ib]->getAlpha();
if (_verbose > 2) {
cout << "E[" << (ib+1) << "] = " << energy << endl << flush;
cout << "alpha[" << (ib+1) << "] = " << _alpha << endl << flush;
}
for (int i = 0; i < numExamples; ++i) {
vector<Label>& labels = _pTrainingData->getLabels(i);
for (int l = 0; l < numClasses; ++l) {
// Here we could have the option of using confidence rated setting so the
// real valued output of classify instead of its sign
if (labels[l].y != 0) { // perhaps replace it by nor_utils::is_zero(labels[l].y)
hx = _baseLearners[ib]->classify(_pTrainingData,i,l);
if ( hx < 0 )
labels[l].y *= -1;
else if ( hx == 0 )
labels[l].y = 0;
}
}
//.........这里部分代码省略.........