本文整理汇总了C++中nor_utils::Args::getValue方法的典型用法代码示例。如果您正苦于以下问题:C++ Args::getValue方法的具体用法?C++ Args::getValue怎么用?C++ Args::getValue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nor_utils::Args
的用法示例。
在下文中一共展示了Args::getValue方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: initLearningOptions
void TreeLearner::initLearningOptions(const nor_utils::Args& args)
{
BaseLearner::initLearningOptions(args);
string baseLearnerName;
args.getValue("baselearnertype", 0, baseLearnerName);
args.getValue("baselearnertype", 1, _numBaseLearners);
// get the registered weak learner (type from name)
BaseLearner* pWeakHypothesisSource = BaseLearner::RegisteredLearners().getLearner(baseLearnerName);
//check whether the weak learner is a ScalarLeaerner
try {
_pScalaWeakHypothesisSource = dynamic_cast<ScalarLearner*>(pWeakHypothesisSource);
}
catch (bad_cast& e) {
cerr << "The weak hypothesis must be a ScalarLearner!!!" << endl;
exit(-1);
}
_pScalaWeakHypothesisSource->initLearningOptions(args);
/*
for( int ib = 0; ib < _numBaseLearners; ++ib ) {
vector< int > tmpVector( 2, -1 );
_idxPairs.push_back( tmpVector );
}
*/
}
示例2: initLearningOptions
void BaseLearner::initLearningOptions(const nor_utils::Args& args)
{
if ( args.hasArgument("verbose") )
args.getValue("verbose", 0, _verbose);
// Set the value of theta
if ( args.hasArgument("edgeoffset") )
args.getValue("edgeoffset", 0, _theta);
}
示例3: initLearningOptions
void ParasiteLearner::initLearningOptions(const nor_utils::Args& args)
{
BaseLearner::initLearningOptions(args);
args.getValue("pool", 0, _nameBaseLearnerFile);
args.getValue("pool", 1, _numBaseLearners);
if ( args.hasArgument("closed") )
_closed = 1;
}
示例4:
VJCascadeClassifier::VJCascadeClassifier(const nor_utils::Args &args, int verbose)
: _verbose(verbose), _args(args), _positiveLabelIndex(-1)
{
// The file with the step-by-step information
if ( args.hasArgument("outputinfo") )
args.getValue("outputinfo", 0, _outputInfoFile);
if ( args.hasArgument("positivelabel") )
{
args.getValue("positivelabel", 0, _positiveLabelName);
} else {
cout << "The name of positive label has to be given!!!" << endl;
exit(-1);
}
}
示例5:
MDDAGClassifier::MDDAGClassifier(const nor_utils::Args &args, int verbose)
: _verbose(verbose), _args(args)
{
// The file with the step-by-step information
if ( args.hasArgument("outputinfo") )
args.getValue("outputinfo", 0, _outputInfoFile);
}
示例6: initLearningOptions
void StochasticLearner::initLearningOptions(const nor_utils::Args& args)
{
BaseLearner::initLearningOptions(args);
if (args.hasArgument("initgamma"))
args.getValue("initgamma", 0, _initialGammat);
if (args.hasArgument("gammdivperiod"))
args.getValue("gammdivperiod", 0, _gammdivperiod);
if (args.hasArgument("graditer"))
args.getValue("graditer", 0, _maxIter);
if (args.hasArgument("gradmethod"))
{
string gradMethod;
args.getValue("gradmethod", 0, gradMethod);
if ( gradMethod.compare( "sgd" ) == 0 )
_gMethod = OPT_SGD;
else if ( gradMethod.compare( "bgd" ) == 0 )
_gMethod = OPT_BGD;
else {
cerr << "SigmoidSingleStumpLearner::Unknown update gradient method" << endl;
exit( -1 );
}
}
if (args.hasArgument("tfunc"))
{
string targetFunction;
args.getValue("tfunc", 0, targetFunction);
if ( targetFunction.compare( "exploss" ) == 0 )
_tFunction = TF_EXPLOSS;
else if ( targetFunction.compare( "edge" ) == 0 )
_tFunction = TF_EDGE;
else {
cerr << "SigmoidSingleStumpLearner::Unknown target function" << endl;
exit( -1 );
}
}
}
示例7: initLearningOptions
void EnumLearnerSA::initLearningOptions(const nor_utils::Args& args)
{
BaseLearner::initLearningOptions(args);
if ( args.hasArgument( "uoffset" ) )
args.getValue("uoffset", 0, _uOffset);
}
示例8: getArgs
void MultiMDDAGLearner::getArgs(const nor_utils::Args& args)
{
MDDAGLearner::getArgs(args);
// Set the value of theta
if ( args.hasArgument("updateperc") )
args.getValue("updateperc", 0, _randomNPercent);
}
示例9: initLearningOptions
void ProductLearner::initLearningOptions(const nor_utils::Args& args)
{
BaseLearner::initLearningOptions(args);
string baseLearnerName;
args.getValue("baselearnertype", 0, baseLearnerName);
args.getValue("baselearnertype", 1, _numBaseLearners);
// get the registered weak learner (type from name)
BaseLearner* pWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner(baseLearnerName);
pWeakHypothesisSource->initLearningOptions(args);
for( int ib = 0; ib < _numBaseLearners; ++ib ) {
_baseLearners.push_back(pWeakHypothesisSource->create());
_baseLearners[ib]->initLearningOptions(args);
}
}
示例10: getArgs
void FilterBoostLearner::getArgs(const nor_utils::Args& args)
{
AdaBoostMHLearner::getArgs( args );
// Set the value of the sample size
if ( args.hasArgument("Cn") )
{
args.getValue("C", 0, _Cn);
if (_verbose > 1)
cout << "--> Resampling size: " << _Cn << endl;
}
}
示例11: resumeProcess
int MultiMDDAGLearner::resumeProcess(const nor_utils::Args& args, InputData* pTestData)
{
int numPolicies = 0;
AlphaReal policyAlpha = 0.0;
if ( args.hasArgument("policyalpha") )
args.getValue("policyalpha", 0, policyAlpha);
_policy = new AdaBoostArrayOfPolicyArray(args, _actionNumber);
return numPolicies;
}
示例12: classify
void SoftCascadeLearner::classify(const nor_utils::Args& args)
{
SoftCascadeClassifier classifier(args, _verbose);
string testFileName = args.getValue<string>("test", 0);
string shypFileName = args.getValue<string>("test", 1);
int numIterations = args.getValue<int>("test", 2);
string outResFileName = "";
if ( args.getNumValues("test") > 3 )
args.getValue("test", 3, outResFileName);
classifier.run(testFileName, shypFileName, numIterations, outResFileName);
}
示例13: classify
void FilterBoostLearner::classify(const nor_utils::Args& args)
{
FilterBoostClassifier classifier(args, _verbose);
// -test <dataFile> <shypFile>
string testFileName = args.getValue<string>("test", 0);
string shypFileName = args.getValue<string>("test", 1);
int numIterations = args.getValue<int>("test", 2);
string outResFileName;
if ( args.getNumValues("test") > 3 )
args.getValue("test", 3, outResFileName);
classifier.run(testFileName, shypFileName, numIterations, outResFileName);
}
示例14: initLearningOptions
void TreeLearnerUCT::initLearningOptions(const nor_utils::Args& args)
{
BaseLearner::initLearningOptions(args);
string baseLearnerName;
args.getValue("baselearnertype", 0, baseLearnerName);
args.getValue("baselearnertype", 1, _numBaseLearners);
// get the registered weak learner (type from name)
BaseLearner* pWeakHypothesisSource =
BaseLearner::RegisteredLearners().getLearner(baseLearnerName);
for( int ib = 0; ib < _numBaseLearners; ++ib ) {
_baseLearners.push_back(pWeakHypothesisSource->create());
_baseLearners[ib]->initLearningOptions(args);
vector< int > tmpVector( 2, -1 );
_idxPairs.push_back( tmpVector );
}
string updateRule = "";
if ( args.hasArgument( "updaterule" ) )
args.getValue("updaterule", 0, updateRule );
if ( updateRule.compare( "edge" ) == 0 )
_updateRule = EDGE_SQUARE;
else if ( updateRule.compare( "alphas" ) == 0 )
_updateRule = ALPHAS;
else if ( updateRule.compare( "edgesquare" ) == 0 )
_updateRule = ESQUARE;
else {
cerr << "Unknown update rule in ProductLearnerUCT (set to default [edge]" << endl;
_updateRule = EDGE_SQUARE;
}
}
示例15: initLearningOptions
// -----------------------------------------------------------------------
void BanditLearner::initLearningOptions(const nor_utils::Args& args)
{
BaseLearner::initLearningOptions(args);
string updateRule = "";
if ( args.hasArgument( "updaterule" ) )
args.getValue("updaterule", 0, updateRule );
if ( updateRule.compare( "edge" ) == 0 )
_updateRule = EDGE_SQUARE;
else if ( updateRule.compare( "logedge" ) == 0 )
_updateRule = LOGEDGE;
else if ( updateRule.compare( "alphas" ) == 0 )
_updateRule = ALPHAS;
else if ( updateRule.compare( "edgesquare" ) == 0 )
_updateRule = ESQUARE;
else {
//cerr << "Unknown update rule in ProductLearnerUCT (set to default [edge]" << endl;
_updateRule = LOGEDGE;
}
if ( args.hasArgument( "rsample" ) ){
_K = args.getValue<int>("rsample", 0);
}
string banditAlgoName = "";
if ( args.hasArgument( "banditalgo" ) )
args.getValue("banditalgo", 0, banditAlgoName );
if ( banditAlgoName.compare( "Random" ) == 0 )
_banditAlgoName = BA_RANDOM_LS;
else if ( banditAlgoName.compare( "UCBK" ) == 0 )
_banditAlgoName = BA_UCBK_LS;
else if ( banditAlgoName.compare( "UCBKR" ) == 0 )
_banditAlgoName = BA_UCBKR_LS;
else if ( banditAlgoName.compare( "UCBKV" ) == 0 )
_banditAlgoName = BA_UCBKV_LS;
else if ( banditAlgoName.compare( "EXP3" ) == 0 )
_banditAlgoName = BA_EXP3_LS;
else if ( banditAlgoName.compare( "EXP3G" ) == 0 )
_banditAlgoName = BA_EXP3G_LS;
else if ( banditAlgoName.compare( "UCT" ) == 0 )
_banditAlgoName = BA_UCT_LS;
else {
cerr << "Unknown bandit algo (BanditSingleStumpLearner)" << endl;
_banditAlgoName = BA_EXP3_LS;
}
if ( _banditAlgo == NULL ) {
switch ( _banditAlgoName )
{
case BA_RANDOM_LS:
//_banditAlgo = new Random();
break;
case BA_UCBK_LS:
//_banditAlgo = new UCBK();
break;
case BA_UCBKV_LS:
//_banditAlgo = new UCBKV();
break;
case BA_UCBKR_LS:
//_banditAlgo = new UCBKRandomized();
break;
case BA_EXP3_LS:
_banditAlgo = dynamic_cast<GenericBanditAlgorithmLS<double,string>*>( new Exp3LS<double,string>());
break;
case BA_EXP3G_LS:
_banditAlgo = dynamic_cast<GenericBanditAlgorithmLS<double,string>*>(new Exp3GLS<double,string>());
break;
case BA_UCT_LS:
_banditAlgo = dynamic_cast<GenericBanditAlgorithmLS<double,string>*>(new UCT<double,string>());
break;
default:
cerr << "There is no bandit algorithm to be given!" << endl;
exit( -1 );
}
}
}