本文整理匯總了Java中weka.core.Utils.getFlag方法的典型用法代碼示例。如果您正苦於以下問題:Java Utils.getFlag方法的具體用法?Java Utils.getFlag怎麽用?Java Utils.getFlag使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類weka.core.Utils
的用法示例。
在下文中一共展示了Utils.getFlag方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: run
import weka.core.Utils; //導入方法依賴的package包/類
@Override
public void run(Object toRun, String[] options)
throws IllegalArgumentException {
if (!(toRun instanceof ArffHeaderHadoopJob)) {
throw new IllegalArgumentException(
"Object to run is not an ArffHeaderHadoopJob!");
}
try {
ArffHeaderHadoopJob ahhj = (ArffHeaderHadoopJob) toRun;
if (Utils.getFlag('h', options)) {
String help = DistributedJob.makeOptionsStr(ahhj);
System.err.println(help);
System.exit(1);
}
ahhj.setOptions(options);
ahhj.runJob();
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例2: run
import weka.core.Utils; //導入方法依賴的package包/類
@Override
public void run(Object toRun, String[] args) throws IllegalArgumentException {
if (!(toRun instanceof WekaClassifierHadoopJob)) {
throw new IllegalArgumentException(
"Object to run is not a WekaClassifierHadoopJob!");
}
try {
WekaClassifierHadoopJob wchj = (WekaClassifierHadoopJob) toRun;
if (Utils.getFlag('h', args)) {
String help = DistributedJob.makeOptionsStr(wchj);
System.err.println(help);
System.exit(1);
}
wchj.setOptions(args);
wchj.runJob();
System.out.println(wchj.getClassifier());
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例3: main
import weka.core.Utils; //導入方法依賴的package包/類
/**
* Executes the tests, use "-h" to list the commandline options.
*
* @param args the commandline parameters
* @throws Exception if something goes wrong
*/
public static void main(String[] args) throws Exception {
CheckSource check;
StringBuffer text;
Enumeration<Option> enm;
check = new CheckSource();
if (Utils.getFlag('h', args)) {
text = new StringBuffer();
text.append("\nHelp requested:\n\n");
enm = check.listOptions();
while (enm.hasMoreElements()) {
Option option = enm.nextElement();
text.append(option.synopsis() + "\n");
text.append(option.description() + "\n");
}
System.out.println("\n" + text + "\n");
} else {
check.setOptions(args);
if (check.execute()) {
System.out.println("Tests OK!");
} else {
System.out.println("Tests failed!");
}
}
}
示例4: main
import weka.core.Utils; //導入方法依賴的package包/類
/**
* Main method
*
* @param args the commandline parameters
*/
static public void main(String[] args) {
BayesNetGenerator b = new BayesNetGenerator();
try {
if ((args.length == 0) || (Utils.getFlag('h', args))) {
printOptions(b);
return;
}
b.setOptions(args);
b.generateRandomNetwork();
if (!b.m_bGenerateNet) { // skip if not required
b.generateInstances();
}
System.out.println(b.toString());
} catch (Exception e) {
e.printStackTrace();
printOptions(b);
}
}
示例5: run
import weka.core.Utils; //導入方法依賴的package包/類
@Override
public void run(Object toRun, String[] args) {
if (!(toRun instanceof KMeansClustererHadoopJob)) {
throw new IllegalArgumentException(
"Object to run is not a CorrelationMatrixHadoopJob!");
}
try {
KMeansClustererHadoopJob job = (KMeansClustererHadoopJob) toRun;
if (Utils.getFlag('h', args)) {
String help = DistributedJob.makeOptionsStr(job);
System.err.println(help);
System.exit(1);
}
job.setOptions(args);
job.runJob();
// if (!DistributedJobConfig.isEmpty(getText())) {
// System.out.println(getText());
// }
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例6: runScript
import weka.core.Utils; //導入方法依賴的package包/類
/**
* Runs the specified script. All options that weren't "consumed" (like "-s"
* for the script filename), will be used as commandline arguments for the
* actual script.
*
* @param script the script object to use
* @param args the commandline arguments
* @throws Exception if execution fails
*/
public static void runScript(Script script, String[] args) throws Exception {
String tmpStr;
File scriptFile;
Vector<String> options;
int i;
if (Utils.getFlag('h', args) || Utils.getFlag("help", args)) {
System.out.println(makeOptionString(script));
} else {
// process options
tmpStr = Utils.getOption('s', args);
if (tmpStr.length() == 0) {
throw new WekaException("No script supplied!");
} else {
scriptFile = new File(tmpStr);
}
script.setOptions(args);
// remove empty elements from array
options = new Vector<String>();
for (i = 0; i < args.length; i++) {
if (args[i].length() > 0) {
options.add(args[i]);
}
}
// run script
script.run(scriptFile, options.toArray(new String[options.size()]));
}
}
示例7: run
import weka.core.Utils; //導入方法依賴的package包/類
@Override
public void run(Object toRun, String[] args) {
if (!(toRun instanceof CorrelationMatrixHadoopJob)) {
throw new IllegalArgumentException(
"Object to run is not a CorrelationMatrixHadoopJob!");
}
try {
CorrelationMatrixHadoopJob job = (CorrelationMatrixHadoopJob) toRun;
if (Utils.getFlag('h', args)) {
String help = DistributedJob.makeOptionsStr(job);
System.err.println(help);
System.exit(1);
}
job.setOptions(args);
job.runJob();
if (!DistributedJobConfig.isEmpty(getText())) {
System.out.println(getText());
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例8: setOptions
import weka.core.Utils; //導入方法依賴的package包/類
@Override
public void setOptions(String[] options) throws Exception {
m_encodeMissingAsZero = Utils.getFlag('M', options);
m_insertDummyNominalFirstValue = Utils.getFlag('F', options);
Utils.checkForRemainingOptions(options);
}
示例9: runLoader
import weka.core.Utils; //導入方法依賴的package包/類
/**
* Run the supplied loader. Parses options and prints help if necessary.
*
* @param loader the loader to run
* @param options the options to set on the loader
*/
public static void runLoader(Loader loader, String[] options) {
// help request?
try {
String[] tmpOptions = options.clone();
if (Utils.getFlag('h', tmpOptions)) {
System.err.println("\nHelp requested\n" + makeOptionStr(loader));
return;
}
} catch (Exception e) {
// ignore it
}
try {
boolean incremental = Utils.getFlag("incremental", options)
&& (loader instanceof IncrementalConverter);
if (loader instanceof OptionHandler) {
((OptionHandler) loader).setOptions(options);
}
if (incremental) {
Instances structure = loader.getStructure();
System.out.println(structure);
Instance temp;
do {
temp = loader.getNextInstance(structure);
if (temp != null)
System.out.println(temp);
} while (temp != null);
} else {
System.out.println(loader.getDataSet());
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例10: removeBlacklist
import weka.core.Utils; //導入方法依賴的package包/類
/**
* removes all the options from the options array that are blacklisted
*
* @param options the options to remove from the blacklist
* @return the processed options array
*/
protected String[] removeBlacklist(String[] options) {
Hashtable<String, Option> pool;
Option option;
// retrieve options that are on blacklist
Enumeration<Option> enm = listOptions();
pool = new Hashtable<String, Option>();
while (enm.hasMoreElements()) {
option = enm.nextElement();
if (isOnBlacklist(option.name())) {
pool.put(option.name(), option);
}
}
// remove options
Enumeration<String> enm2 = pool.keys();
while (enm2.hasMoreElements()) {
option = pool.get(enm2.nextElement());
try {
if (option.numArguments() == 0) {
Utils.getFlag(option.name(), options);
} else {
Utils.getOption(option.name(), options);
}
} catch (Exception e) {
e.printStackTrace();
}
}
return options;
}
示例11: main
import weka.core.Utils; //導入方法依賴的package包/類
/**
* Main method.
*
* @param args the commandline options
*/
public static void main(String[] args) {
try {
String[] argsCopy = args.clone();
if (Utils.getFlag('h', argsCopy) || Utils.getFlag("help", argsCopy)) {
runAssociator(new FPGrowth(), args);
System.out
.println("-disk\n\tProcess data off of disk instead of loading\n\t"
+ "into main memory. This is a command line only option.");
return;
}
if (!Utils.getFlag("disk", args)) {
runAssociator(new FPGrowth(), args);
} else {
String filename;
filename = Utils.getOption('t', args);
weka.core.converters.ArffLoader loader = null;
if (filename.length() != 0) {
loader = new weka.core.converters.ArffLoader();
loader.setFile(new java.io.File(filename));
} else {
throw new Exception("No training file specified!");
}
FPGrowth fpGrowth = new FPGrowth();
fpGrowth.setOptions(args);
Utils.checkForRemainingOptions(args);
fpGrowth.buildAssociations(loader);
System.out.print(fpGrowth.toString());
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例12: SelectAttributes
import weka.core.Utils; //導入方法依賴的package包/類
/**
* Perform attribute selection with a particular evaluator and a set of
* options specifying search method and input file etc.
*
* @param ASEvaluator an evaluator object
* @param options an array of options, not only for the evaluator but also the
* search method (if any) and an input data file
* @return the results of attribute selection as a String
* @exception Exception if no training file is set
*/
public static String SelectAttributes(ASEvaluation ASEvaluator,
String[] options) throws Exception {
String trainFileName, searchName;
Instances train = null;
ASSearch searchMethod = null;
String[] optionsTmp = options.clone();
boolean helpRequested = false;
try {
// get basic options (options the same for all attribute selectors
trainFileName = Utils.getOption('i', options);
helpRequested = Utils.getFlag('h', optionsTmp);
if (helpRequested || (trainFileName.length() == 0)) {
searchName = Utils.getOption('s', optionsTmp);
if (searchName.length() != 0) {
String[] searchOptions = Utils.splitOptions(searchName);
searchMethod =
(ASSearch) Class.forName(searchOptions[0]).newInstance();
}
if (helpRequested) {
throw new Exception("Help requested.");
} else {
throw new Exception("No training file given.");
}
}
} catch (Exception e) {
throw new Exception('\n' + e.getMessage()
+ makeOptionString(ASEvaluator, searchMethod));
}
DataSource source = new DataSource(trainFileName);
train = source.getDataSet();
return SelectAttributes(ASEvaluator, options, train);
}
示例13: setOptions
import weka.core.Utils; //導入方法依賴的package包/類
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start -->
* Valid options are: <p/>
*
* <pre> -E <expr>
* The expression to use for filtering
* (default: true).</pre>
*
* <pre> -F
* Apply the filter to instances that arrive after the first
* (training) batch. The default is to not apply the filter (i.e.
* always return the instance)</pre>
*
* <pre> -output-debug-info
* If set, filter is run in debug mode and
* may output additional info to the console</pre>
*
* <pre> -do-not-check-capabilities
* If set, filter capabilities are not checked when input format is set
* (use with caution).</pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String tmpStr = Utils.getOption('E', options);
if (tmpStr.length() != 0) {
setExpression(tmpStr);
} else {
setExpression("true");
}
m_filterAfterFirstBatch = Utils.getFlag('F', options);
if (getInputFormat() != null) {
setInputFormat(getInputFormat());
}
super.setOptions(options);
Utils.checkForRemainingOptions(options);
}
示例14: setup
import weka.core.Utils; //導入方法依賴的package包/類
@Override
public void setup(Context context) throws IOException {
Configuration conf = context.getConfiguration();
m_task = new CorrelationMatrixRowReduceTask();
String sMapTaskOpts = conf
.get(CorrelationMatrixHadoopMapper.CORRELATION_MATRIX_MAP_TASK_OPTIONS);
if (!DistributedJobConfig.isEmpty(sMapTaskOpts)) {
try {
String[] opts = Utils.splitOptions(sMapTaskOpts);
m_missingsWereReplacedWithMeans = !Utils
.getFlag("ignore-missing", opts);
m_covariance = Utils.getFlag("covariance", opts);
m_deleteClassIfSet = !Utils.getFlag("keep-class", opts);
// name of the training ARFF header file
String arffHeaderFileName = Utils.getOption("arff-header", opts);
if (DistributedJobConfig.isEmpty(arffHeaderFileName)) {
throw new IOException(
"Can't continue without the name of the ARFF header file!");
}
m_headerWithSummaryAtts = WekaClassifierHadoopMapper
.loadTrainingHeader(arffHeaderFileName);
Instances trainingHeader = CSVToARFFHeaderReduceTask
.stripSummaryAtts(m_headerWithSummaryAtts);
WekaClassifierHadoopMapper.setClassIndex(opts, trainingHeader, false);
// set any class index in the header with summary attributes. Summary
// atts always come after regular atts, so the class index in the
// stripped will be the same for the version with summary atts
if (trainingHeader.classIndex() >= 0) {
m_headerWithSummaryAtts.setClassIndex(trainingHeader.classIndex());
}
} catch (Exception e) {
throw new IOException(e);
}
}
}
開發者ID:mydzigear,項目名稱:repo.kmeanspp.silhouette_score,代碼行數:45,代碼來源:CorrelationMatrixRowHadoopReducer.java
示例15: setup
import weka.core.Utils; //導入方法依賴的package包/類
@Override
public void setup(Context context) throws IOException {
Configuration conf = context.getConfiguration();
m_outputDestination = conf.get(SKETCH_WRITE_PATH);
if (DistributedJobConfig.isEmpty(m_outputDestination)) {
throw new IOException("No output path for centroid sketches supplied!");
}
String taskOptsS =
conf
.get(KMeansCentroidSketchHadoopMapper.CENTROID_SKETCH_MAP_TASK_OPTIONS);
// determine if this is the first iteration:
// if so then we need to deal with the distance function
// with respect to global priming data
try {
if (!DistributedJobConfig.isEmpty(taskOptsS)) {
String[] taskOpts = Utils.splitOptions(taskOptsS);
// name of the training ARFF header file
String arffHeaderFileName = Utils.getOption("arff-header", taskOpts);
if (DistributedJobConfig.isEmpty(arffHeaderFileName)) {
throw new IOException(
"Can't continue without the name of the ARFF header file!");
}
Instances headerWithSummary =
WekaClassifierHadoopMapper.loadTrainingHeader(arffHeaderFileName);
// first iteration?
m_isFirstIteration = Utils.getFlag("first-iteration", taskOpts);
KMeansMapTask forFilteringOnly = new KMeansMapTask();
forFilteringOnly.setOptions(taskOpts);
// gets us the header (sans summary attributes) after it has passed
// through any filters that the user may have specified (including the
// replace missing values filter)
m_transformedHeaderNoSummary = forFilteringOnly.init(headerWithSummary);
}
} catch (Exception ex) {
throw new IOException(ex);
}
}
開發者ID:mydzigear,項目名稱:repo.kmeanspp.silhouette_score,代碼行數:44,代碼來源:KMeansCentroidSketchHadoopReducer.java