本文整理汇总了C++中BaseLearner::getEdge方法的典型用法代码示例。如果您正苦于以下问题:C++ BaseLearner::getEdge方法的具体用法?C++ BaseLearner::getEdge怎么用?C++ BaseLearner::getEdge使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BaseLearner
的用法示例。
在下文中一共展示了BaseLearner::getEdge方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
//.........这里部分代码省略.........
{
int randomIndex = (rand() % pTrainingData->getNumExamples());
//int randomIndex = getRandomIndex();
pStochasticLearner->update(randomIndex);
pStochasticConstantWeakHypothesis->update(randomIndex);
}
pStochasticLearner->finishLearning();
pStochasticConstantWeakHypothesis->finishLearning();
}
catch (bad_cast& e) {
cerr << "The weak learner must be a StochasticLearner!!!" << endl;
exit(-1);
}
}
else
{
filter( pTrainingData, currentNumberOfUsedData );
if ( pTrainingData->getNumExamples() < 2 )
{
filter( pTrainingData, currentNumberOfUsedData, false );
}
if (_verbose > 1)
{
cout << "--> Size of training data = " << pTrainingData->getNumExamples() << endl;
}
energy = pWeakHypothesis->run();
pConstantWeakHypothesis->run();
}
//estimate edge
filter( pTrainingData, currentNumberOfUsedData, false );
edge = pWeakHypothesis->getEdge(true) / 2.0;
constantEdge = pConstantWeakHypothesis->getEdge() / 2.0;
if ( constantEdge > edge )
{
delete pWeakHypothesis;
pWeakHypothesis = pConstantWeakHypothesis;
edge = constantEdge;
} else {
delete pConstantWeakHypothesis;
}
// calculate alpha
AlphaReal alpha = 0.0;
alpha = 0.5 * log( ( 1 + edge ) / ( 1 - edge ) );
pWeakHypothesis->setAlpha( alpha );
_sumAlpha += alpha;
if (_verbose > 1)
cout << "Weak learner: " << pWeakHypothesis->getName()<< endl;
// Output the step-by-step information
pTrainingData->clearIndexSet();
printOutputInfo(pOutInfo, t, pTrainingData, pTestData, pWeakHypothesis);
// Updates the weights and returns the edge
//AlphaReal gamma = updateWeights(pTrainingData, pWeakHypothesis);
if (_verbose > 1)
{
cout << setprecision(5)
<< "--> Alpha = " << pWeakHypothesis->getAlpha() << endl
<< "--> Edge = " << edge << endl
示例2: calculateChildrenAndEnergies
void TreeLearnerUCT::calculateChildrenAndEnergies( NodePointUCT& bLearner, int depthIndex ) {
bLearner._extended = true;
_pTrainingData->loadIndexSet( bLearner._learnerIdxSet );
//separate the dataset
set< int > idxPos, idxNeg;
idxPos.clear();
idxNeg.clear();
float phix;
for (int i = 0; i < _pTrainingData->getNumExamples(); ++i) {
// this returns the phi value of classifier
phix = bLearner._learner->classify(_pTrainingData,i,0);
if ( phix < 0 )
idxNeg.insert( _pTrainingData->getRawIndex( i ) );
else if ( phix > 0 ) { // have to redo the multiplications, haven't been tested
idxPos.insert( _pTrainingData->getRawIndex( i ) );
}
}
if ( (idxPos.size() < 1 ) || (idxNeg.size() < 1 ) ) {
//retval.clear();
bLearner._extended = false;
//return retval;
}
_pTrainingData->loadIndexSet( idxPos );
if ( ! _pTrainingData->isSamplesFromOneClass() ) {
BaseLearner* posLearner = _baseLearners[0]->copyState();
//posLearner->run();
dynamic_cast<FeaturewiseLearner*>(posLearner)->run( depthIndex );
//
//float posEdge = getEdge( posLearner, _pTrainingData );
posLearner->setTrainingData( _pTrainingData );
bLearner._leftEdge = posLearner->getEdge();
//tmpPair.first = posEdge;
//tmpPair.second.first.first = posLearner;
bLearner._leftChild = posLearner;
//set the parent idx to zero
//tmpPair.second.first.second.first = 0;
//this means that it will be a left child in the tree
//tmpPair.second.first.second.second = 0;
//tmpPair.second.second = idxPos;
bLearner._leftChildIdxSet = idxPos;
} else {
BaseLearner* pConstantWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner("ConstantLearner");
BaseLearner* posLearner = pConstantWeakHypothesisSource->create();
posLearner->setTrainingData(_pTrainingData);
//float constantEnergy = posLearner->run();
dynamic_cast<FeaturewiseLearner*>(posLearner)->run( depthIndex );
//BaseLearner* posLearner = _baseLearners[0]->copyState();
//float posEdge = getEdge( posLearner, _pTrainingData );
posLearner->setTrainingData( _pTrainingData );
bLearner._leftEdge = posLearner->getEdge();
//tmpPair.first = posEdge;
//tmpPair.second.first.first = posLearner;
bLearner._leftChild = posLearner;
//set the parent idx to zero
//tmpPair.second.first.second.first = 0;
//this means that it will be a left child in the tree
//tmpPair.second.first.second.second = 0;
//tmpPair.second.second = idxPos;
bLearner._leftChildIdxSet = idxPos;
}
//retval.push_back( tmpPair );
_pTrainingData->loadIndexSet( idxNeg );
if ( ! _pTrainingData->isSamplesFromOneClass() ) {
BaseLearner* negLearner = _baseLearners[0]->copyState();
//negLearner->run();
dynamic_cast<FeaturewiseLearner*>(negLearner)->run( depthIndex );
//float negEdge = getEdge( negLearner, _pTrainingData );
negLearner->setTrainingData( _pTrainingData );
bLearner._rightEdge = negLearner->getEdge();
//tmpPair.first = negEdge;
//tmpPair.second.first.first = negLearner;
bLearner._rightChild = negLearner;
//set the parent idx to zero
//tmpPair.second.first.second.first = 0;
//this means that it will be a right child in the tree
//tmpPair.second.first.second.second = 1;
//tmpPair.second.second = idxNeg;
bLearner._rightChildIdxSet = idxNeg;
} else {
BaseLearner* pConstantWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner("ConstantLearner");
BaseLearner* negLearner = pConstantWeakHypothesisSource->create();
//.........这里部分代码省略.........
示例3: run
//.........这里部分代码省略.........
for (int t = startingIteration; t < _numIterations; ++t)
{
if (_verbose > 1)
cout << "------- WORKING ON ITERATION " << (t+1) << " -------" << endl;
filter( pTrainingData, (int)(_Cn * log(t+2.0)) );
if ( pTrainingData->getNumExamples() < 2 )
{
filter( pTrainingData, (int)(_Cn * log(t+2.0)), false );
}
if (_verbose > 1)
{
cout << "--> Size of training data = " << pTrainingData->getNumExamples() << endl;
}
BaseLearner* pWeakHypothesis = pWeakHypothesisSource->create();
pWeakHypothesis->initLearningOptions(args);
//pTrainingData->clearIndexSet();
pWeakHypothesis->setTrainingData(pTrainingData);
float energy = pWeakHypothesis->run();
BaseLearner* pConstantWeakHypothesis;
if (_withConstantLearner) // check constant learner if user wants it
{
pConstantWeakHypothesis = pConstantWeakHypothesisSource->create() ;
pConstantWeakHypothesis->initLearningOptions(args);
pConstantWeakHypothesis->setTrainingData(pTrainingData);
float constantEnergy = pConstantWeakHypothesis->run();
}
//estimate edge
filter( pTrainingData, (int)(_Cn * log(t+2.0)), false );
float edge = pWeakHypothesis->getEdge() / 2.0;
if (_withConstantLearner) // check constant learner if user wants it
{
float constantEdge = pConstantWeakHypothesis->getEdge() / 2.0;
if ( constantEdge > edge )
{
delete pWeakHypothesis;
pWeakHypothesis = pConstantWeakHypothesis;
edge = constantEdge;
} else {
delete pConstantWeakHypothesis;
}
}
// calculate alpha
float alpha = 0.0;
alpha = 0.5 * log( ( 0.5 + edge ) / ( 0.5 - edge ) );
pWeakHypothesis->setAlpha( alpha );
if (_verbose > 1)
cout << "Weak learner: " << pWeakHypothesis->getName()<< endl;
// Output the step-by-step information
pTrainingData->clearIndexSet();
printOutputInfo(pOutInfo, t, pTrainingData, pTestData, pWeakHypothesis);
// Updates the weights and returns the edge
float gamma = updateWeights(pTrainingData, pWeakHypothesis);
if (_verbose > 1)
{
cout << setprecision(5)
<< "--> Alpha = " << pWeakHypothesis->getAlpha() << endl
示例4: run
AlphaReal TreeLearnerUCT::run()
{
if ( _numOfCalling == 0 ) {
if (_verbose > 0) {
cout << "Initializing tree..." << endl;
}
InnerNodeUCTSparse::setDepth( _numBaseLearners );
InnerNodeUCTSparse::setBranchOrder( _pTrainingData->getNumAttributes() );
_root.setChildrenNum();
//createUCTTree();
}
_numOfCalling++;
set< int > tmpIdx, idxPos, idxNeg;
_pTrainingData->clearIndexSet();
for( int i = 0; i < _pTrainingData->getNumExamples(); i++ ) tmpIdx.insert( i );
vector< int > trajectory(0);
_root.getBestTrajectory( trajectory );
// for UCT
for(int ib = 0; ib < _numBaseLearners; ++ib)
_baseLearners[ib]->setTrainingData(_pTrainingData);
AlphaReal edge = numeric_limits<AlphaReal>::max();
BaseLearner* pPreviousBaseLearner = 0;
//floatBaseLearner tmpPair, tmpPairPos, tmpPairNeg;
// for storing the inner point (learneres) which will be extended
//vector< floatBaseLearner > bLearnerVector;
InnerNodeType innerNode;
priority_queue<InnerNodeType, deque<InnerNodeType>, greater_first<InnerNodeType> > pq;
//train the first learner
//_baseLearners[0]->run();
pPreviousBaseLearner = _baseLearners[0]->copyState();
dynamic_cast<FeaturewiseLearner*>(pPreviousBaseLearner)->run( trajectory[0] );
//this contains the number of baselearners
int ib = 0;
NodePointUCT tmpNodePoint, nodeLeft, nodeRight;
////////////////////////////////////////////////////////
//set the edge
//tmpPair.first = getEdge( pPreviousBaseLearner, _pTrainingData );
//tmpPair.second.first.first = pPreviousBaseLearner;
// set the pointer of the parent
//tmpPair.second.first.second.first = 0;
// set that this is a neg child
//tmpPair.second.first.second.second = 0;
//tmpPair.second.second = tmpIdx;
//bLearnerVector = calculateChildrenAndEnergies( tmpPair );
///
pPreviousBaseLearner->setTrainingData( _pTrainingData );
tmpNodePoint._edge = pPreviousBaseLearner->getEdge();
tmpNodePoint._learner = pPreviousBaseLearner;
tmpNodePoint._idx = 0;
tmpNodePoint._depth = 0;
tmpNodePoint._learnerIdxSet = tmpIdx;
calculateChildrenAndEnergies( tmpNodePoint, trajectory[1] );
////////////////////////////////////////////////////////
//insert the root into the priority queue
if ( tmpNodePoint._extended )
{
if (_verbose > 2) {
//cout << "Edges: (parent, pos, neg): " << bLearnerVector[0].first << " " << bLearnerVector[1].first << " " << bLearnerVector[2].first << endl << flush;
//cout << "alpha[" << (ib) << "] = " << _alpha << endl << flush;
cout << "Edges: (parent, pos, neg): " << tmpNodePoint._edge << " " << tmpNodePoint._leftEdge << " " << tmpNodePoint._rightEdge << endl << flush;
}
// if the energy is getting higher then we push it into the priority queue
if ( tmpNodePoint._edge < ( tmpNodePoint._leftEdge + tmpNodePoint._rightEdge ) ) {
float deltaEdge = abs( tmpNodePoint._edge - ( tmpNodePoint._leftEdge + tmpNodePoint._rightEdge ) );
innerNode.first = deltaEdge;
innerNode.second = tmpNodePoint;
pq.push( innerNode );
} else {
//delete bLearnerVector[0].second.first.first;
delete tmpNodePoint._leftChild;
delete tmpNodePoint._rightChild;
}
}
if ( pq.empty() ) {
//.........这里部分代码省略.........