本文整理汇总了C++中nor_utils::Args::declareArgument方法的典型用法代码示例。如果您正苦于以下问题:C++ Args::declareArgument方法的具体用法?C++ Args::declareArgument怎么用?C++ Args::declareArgument使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nor_utils::Args
的用法示例。
在下文中一共展示了Args::declareArgument方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: declareArguments
void StochasticLearner::declareArguments(nor_utils::Args& args)
{
BaseLearner::declareArguments(args);
args.declareArgument("graditer",
"Declares the number of randomly drawn training size for SGD"
"whereas it declares the number of iteration for the Batch Gradiend Descend"
" size <num> of training set. "
"Example: --graditer 50 -> Uses only 50 randomly chosen training instance",
1, "<num>");
args.declareArgument("gradmethod",
"Declares the gradient method: "
" (sgd) Stochastic Gradient Descent, (bgd) Batch Gradient Descent"
"Example: --gradmethod sgd -> Uses stochastic gradient method",
1, "<method>");
args.declareArgument("tfunc",
"Target function: "
"exploss: Exponential Loss, edge: max. edge"
"Example: --tfunc exploss -> Uses exponantial loss for minimizing",
1, "<function>");
args.declareArgument("initgamma",
"The initial learning rate in gradient descent"
"Default values is 10.0",
1, "<gamma>");
args.declareArgument("gammdivperiod",
"The periodicity of decreasing the learning rate \\gamma"
"Default values is 1",
1, "<period>");
}
示例2: declareArguments
void BanditSingleStumpLearner::declareArguments(nor_utils::Args& args)
{
FeaturewiseLearner::declareArguments(args);
args.declareArgument("updaterule",
"The update weights in the UCT can be the 1-sqrt( 1- edge^2 ) [edge]\n"
" or the alpha [alphas]\n"
" Default is the first one\n",
1, "<type>");
args.declareArgument("rsample",
"Number of features to be considered\n"
" Default is one\n",
1, "<K>");
args.declareArgument("banditalgo",
"The bandit algorithm (UCBK, UCBKRandomized, EXP3 )\n"
"Default is UCBK\n",
1, "<algoname>");
args.declareArgument("percent",
"The percent of database will be used for estimating the payoffs(EXP3G)\n"
" Default is 10%\n",
1, "<p>");
}
示例3: declareArguments
void ParasiteLearner::declareArguments(nor_utils::Args& args)
{
BaseLearner::declareArguments(args);
args.declareArgument("pool",
"The name of the shyp file containing the pool of\n"
" weak learners, followed by the number of desired\n"
" weak learners. If -1 or more than the number of \n"
" weak learners, we use all of them",
2, "<fileName> <nBaseLearners>");
args.declareArgument("closed", "Include negatives of weak learners (default = false).");
}
示例4: declareArguments
void TreeLearnerUCT::declareArguments(nor_utils::Args& args)
{
BaseLearner::declareArguments(args);
args.declareArgument("baselearnertype",
"The name of the learner that serves as a basis for the product\n"
" and the number of base learners to be multiplied\n"
" Don't forget to add its parameters\n",
2, "<baseLearnerType> <numBaseLearners>");
args.declareArgument("updaterule",
"The update weights in the UCT can be the 1-sqrt( 1- edge^2 ) [edge]\n"
" or the alpha [alphas]\n"
" or edgesquare [edgesquare]\n"
" Default is the first one\n",
1, "<type>");
}
示例5: declareArguments
void EnumLearnerSA::declareArguments(nor_utils::Args& args)
{
BaseLearner::declareArguments(args);
args.declareArgument("uoffset",
"The offset of u\n",
1, "<offset>");
}
示例6: declareBaseArguments
void BaseLearner::declareBaseArguments(nor_utils::Args& args)
{
args.declareArgument("shypname",
"The name of output strong hypothesis (default: "
+ string(SHYP_NAME) + "." + string(SHYP_EXTENSION) + ").",
1, "<filename>");
args.declareArgument("shypcomp",
"The shyp file will be compressed",
1, "<flag 0-1>");
args.setGroup("Basic Algorithm Options");
args.declareArgument("resume",
"Resumes a training process using the strong hypothesis file.",
1, "<shypFile>");
args.declareArgument("edgeoffset",
"Defines the value of the edge offset (theta) (default: no edge offset).",
1, "<val>");
}
示例7: declareArguments
void BanditTreeLearner::declareArguments(nor_utils::Args& args)
{
BanditLearner::declareArguments(args);
args.declareArgument("baselearnertype",
"The name of the learner that serves as a basis for the product\n"
" and the number of base learners to be multiplied\n"
" Don't forget to add its parameters\n",
2, "<baseLearnerType> <numBaseLearners>");
}
示例8: declareArguments
void FeaturewiseLearner::declareArguments(nor_utils::Args& args)
{
AbstainableLearner::declareArguments(args);
args.declareArgument("rsample",
"Instead of searching for a featurewise in all the possible dimensions (features), select a set of "
" size <num> of random dimensions. "
"Example: -rsample 50 -> Search over only 50 dimensions"
"(Turned off for Haar: use -csample instead)",
1, "<num>");
}
示例9: declareArguments
void AbstainableLearner::declareArguments(nor_utils::Args& args)
{
BaseLearner::declareArguments(args);
args.declareArgument("abstention",
"Activate the abstention. Available types are:\n"
" greedy: sorting and checking in O(k^2)\n"
" full: the O(2^k) full search\n"
" real: use the AdaBoost.MH with real valued predictions\n"
" classwise: abstain if classwise edge <= theta",
1, "<type>");
}