本文整理汇总了Java中org.apache.hadoop.util.ToolRunner.printGenericCommandUsage方法的典型用法代码示例。如果您正苦于以下问题:Java ToolRunner.printGenericCommandUsage方法的具体用法?Java ToolRunner.printGenericCommandUsage怎么用?Java ToolRunner.printGenericCommandUsage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.util.ToolRunner
的用法示例。
在下文中一共展示了ToolRunner.printGenericCommandUsage方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: init
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
/** Parse the command line arguments and initialize the data */
private int init(String[] args) {
try { // initialize file system handle
fc = FileContext.getFileContext(getConf());
} catch (IOException ioe) {
System.err.println("Can not initialize the file system: " +
ioe.getLocalizedMessage());
return -1;
}
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-root")) {
root = new Path(args[++i]);
} else if (args[i].equals("-inDir")) {
inDir = new File(args[++i]);
} else {
System.err.println(USAGE);
ToolRunner.printGenericCommandUsage(System.err);
System.exit(-1);
}
}
return 0;
}
示例2: printHelp
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
private static void printHelp(String cmd) {
String summary = "scmadmin is the command to execute shared cache manager" +
"administrative commands.\n" +
"The full syntax is: \n\n" +
"hadoop scmadmin" +
" [-runCleanerTask]" +
" [-help [cmd]]\n";
String runCleanerTask =
"-runCleanerTask: Run cleaner task right away.\n";
String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" +
"\t\tis specified.\n";
if ("runCleanerTask".equals(cmd)) {
System.out.println(runCleanerTask);
} else if ("help".equals(cmd)) {
System.out.println(help);
} else {
System.out.println(summary);
System.out.println(runCleanerTask);
System.out.println(help);
System.out.println();
ToolRunner.printGenericCommandUsage(System.out);
}
}
示例3: printUsage
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
static int printUsage() {
ToolRunner.printGenericCommandUsage(System.out);
System.out.println(
"Usage: Task list: -[no]r -[no]w\n" +
" Format: -[no]seq -[no]txt\n" +
" CompressionCodec: -[no]zip -[no]pln\n" +
" CompressionType: -[no]blk -[no]rec\n" +
" Required: -dir <working dir>\n" +
"All valid combinations are implicitly enabled, unless an option is enabled\n" +
"explicitly. For example, specifying \"-zip\", excludes -pln,\n" +
"unless they are also explicitly included, as in \"-pln -zip\"\n" +
"Note that CompressionType params only apply to SequenceFiles\n\n" +
"Useful options to set:\n" +
"-D fs.defaultFS=\"file:///\" \\\n" +
"-D fs.file.impl=org.apache.hadoop.fs.RawLocalFileSystem \\\n" +
"-D filebench.file.bytes=$((10*1024*1024*1024)) \\\n" +
"-D filebench.key.words=5 \\\n" +
"-D filebench.val.words=20\n");
return -1;
}
示例4: printUsage
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
protected void printUsage(PrintStream errOut) {
errOut.println(getUsageString());
for (Map.Entry<String, UsageInfo> e : USAGE.entrySet()) {
String cmd = e.getKey();
UsageInfo usage = e.getValue();
errOut.println(" [" + cmd + " " + usage.args + "]");
}
errOut.println();
ToolRunner.printGenericCommandUsage(errOut);
}
示例5: printHelp
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
/**
* Print the help message for this tool.
* @param opts the configured tool options
*/
public void printHelp(ToolOptions opts) {
System.out.println("usage: sqoop " + getToolName()
+ " [GENERIC-ARGS] [TOOL-ARGS]");
System.out.println("");
opts.printHelp();
System.out.println("");
System.out.println("Generic Hadoop command-line arguments:");
System.out.println("(must preceed any tool-specific arguments)");
ToolRunner.printGenericCommandUsage(System.out);
}
示例6: printHelp
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
@Override
/** {@inheritDoc} */
public void printHelp(ToolOptions opts) {
System.out.println("usage: sqoop " + getToolName()
+ " [GENERIC-ARGS] [JOB-ARGS] [-- [<tool-name>] [TOOL-ARGS]]");
System.out.println("");
opts.printHelp();
System.out.println("");
System.out.println("Generic Hadoop command-line arguments:");
System.out.println("(must preceed any tool-specific arguments)");
ToolRunner.printGenericCommandUsage(System.out);
}
示例7: printUsage
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
/**
* Displays format of commands.
*
* @param cmd
* The command that is being executed.
*/
private static void printUsage(String cmd) {
if ("-refreshUserToGroupsMappings".equals(cmd)) {
System.err
.println("Usage: mapred hsadmin [-refreshUserToGroupsMappings]");
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
System.err
.println("Usage: mapred hsadmin [-refreshSuperUserGroupsConfiguration]");
} else if ("-refreshAdminAcls".equals(cmd)) {
System.err.println("Usage: mapred hsadmin [-refreshAdminAcls]");
} else if ("-refreshLoadedJobCache".equals(cmd)) {
System.err.println("Usage: mapred hsadmin [-refreshLoadedJobCache]");
} else if ("-refreshJobRetentionSettings".equals(cmd)) {
System.err
.println("Usage: mapred hsadmin [-refreshJobRetentionSettings]");
} else if ("-refreshLogRetentionSettings".equals(cmd)) {
System.err
.println("Usage: mapred hsadmin [-refreshLogRetentionSettings]");
} else if ("-getGroups".equals(cmd)) {
System.err.println("Usage: mapred hsadmin" + " [-getGroups [username]]");
} else {
System.err.println("Usage: mapred hsadmin");
System.err.println(" [-refreshUserToGroupsMappings]");
System.err.println(" [-refreshSuperUserGroupsConfiguration]");
System.err.println(" [-refreshAdminAcls]");
System.err.println(" [-refreshLoadedJobCache]");
System.err.println(" [-refreshJobRetentionSettings]");
System.err.println(" [-refreshLogRetentionSettings]");
System.err.println(" [-getGroups [username]]");
System.err.println(" [-help [cmd]]");
System.err.println();
ToolRunner.printGenericCommandUsage(System.err);
}
}
示例8: main
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
// -files option is also used by GenericOptionsParser
// Make sure that is not the first argument for fsck
int res = -1;
if ((args.length == 0) || ("-files".equals(args[0]))) {
printUsage(System.err);
ToolRunner.printGenericCommandUsage(System.err);
} else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
res = 0;
} else {
res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
}
System.exit(res);
}
示例9: run
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
public int run(String[] args) throws Exception {
if(args.length < 1) {
System.err.println("FailJob " +
" (-failMappers|-failReducers)");
ToolRunner.printGenericCommandUsage(System.err);
return 2;
}
boolean failMappers = false, failReducers = false;
for (int i = 0; i < args.length; i++ ) {
if (args[i].equals("-failMappers")) {
failMappers = true;
}
else if(args[i].equals("-failReducers")) {
failReducers = true;
}
}
if (!(failMappers ^ failReducers)) {
System.err.println("Exactly one of -failMappers or -failReducers must be specified.");
return 3;
}
// Write a file with one line per mapper.
final FileSystem fs = FileSystem.get(getConf());
Path inputDir = new Path(FailJob.class.getSimpleName() + "_in");
fs.mkdirs(inputDir);
for (int i = 0; i < getConf().getInt("mapred.map.tasks", 1); ++i) {
BufferedWriter w = new BufferedWriter(new OutputStreamWriter(
fs.create(new Path(inputDir, Integer.toString(i)))));
w.write(Integer.toString(i) + "\n");
w.close();
}
Job job = createJob(failMappers, failReducers, inputDir);
return job.waitForCompletion(true) ? 0 : 1;
}
示例10: printUsage
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
static int printUsage() {
System.out.println("randomtextwriter " +
"[-outFormat <output format class>] " +
"<output>");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
}
示例11: run
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
/**
* Parse arguments and then runs a map/reduce job.
* @return a non-zero value if there is an error. Otherwise, return 0.
*/
public int run(String[] args) throws IOException {
if (args.length != 4) {
System.err.println("Usage: java " + getClass().getName()
+ " <startDigit> <nDigits> <nMaps> <workingDir>");
ToolRunner.printGenericCommandUsage(System.err);
return -1;
}
final int startDigit = Integer.parseInt(args[0]);
final int nDigits = Integer.parseInt(args[1]);
final int nMaps = Integer.parseInt(args[2]);
final String workingDir = args[3];
if (startDigit <= 0) {
throw new IllegalArgumentException("startDigit = " + startDigit+" <= 0");
} else if (nDigits <= 0) {
throw new IllegalArgumentException("nDigits = " + nDigits + " <= 0");
} else if (nDigits % BBP_HEX_DIGITS != 0) {
throw new IllegalArgumentException("nDigits = " + nDigits
+ " is not a multiple of " + BBP_HEX_DIGITS);
} else if (nDigits - 1L + startDigit > IMPLEMENTATION_LIMIT + BBP_HEX_DIGITS) {
throw new UnsupportedOperationException("nDigits - 1 + startDigit = "
+ (nDigits - 1L + startDigit)
+ " > IMPLEMENTATION_LIMIT + BBP_HEX_DIGITS,"
+ ", where IMPLEMENTATION_LIMIT=" + IMPLEMENTATION_LIMIT
+ "and BBP_HEX_DIGITS=" + BBP_HEX_DIGITS);
} else if (nMaps <= 0) {
throw new IllegalArgumentException("nMaps = " + nMaps + " <= 0");
}
compute(startDigit, nDigits, nMaps, workingDir, getConf(), System.out);
return 0;
}
示例12: printUsage
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
/** Print usage messages */
public static int printUsage(String[] args, String usage) {
err.println("args = " + Arrays.asList(args));
err.println();
err.println("Usage: java " + usage);
err.println();
ToolRunner.printGenericCommandUsage(err);
return -1;
}
示例13: printHelp
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
/**
* Print help.
*/
private void printHelp() {
String summary =
"Usage: bin/hdfs oev [OPTIONS] -i INPUT_FILE -o OUTPUT_FILE\n" +
"Offline edits viewer\n" +
"Parse a Hadoop edits log file INPUT_FILE and save results\n" +
"in OUTPUT_FILE.\n" +
"Required command line arguments:\n" +
"-i,--inputFile <arg> edits file to process, xml (case\n" +
" insensitive) extension means XML format,\n" +
" any other filename means binary format\n" +
"-o,--outputFile <arg> Name of output file. If the specified\n" +
" file exists, it will be overwritten,\n" +
" format of the file is determined\n" +
" by -p option\n" +
"\n" +
"Optional command line arguments:\n" +
"-p,--processor <arg> Select which type of processor to apply\n" +
" against image file, currently supported\n" +
" processors are: binary (native binary format\n" +
" that Hadoop uses), xml (default, XML\n" +
" format), stats (prints statistics about\n" +
" edits file)\n" +
"-h,--help Display usage information and exit\n" +
"-f,--fix-txids Renumber the transaction IDs in the input,\n" +
" so that there are no gaps or invalid " +
" transaction IDs.\n" +
"-r,--recover When reading binary edit logs, use recovery \n" +
" mode. This will give you the chance to skip \n" +
" corrupt parts of the edit log.\n" +
"-v,--verbose More verbose output, prints the input and\n" +
" output filenames, for processors that write\n" +
" to a file, also output to screen. On large\n" +
" image files this will dramatically increase\n" +
" processing time (default is false).\n";
System.out.println(summary);
System.out.println();
ToolRunner.printGenericCommandUsage(System.out);
}
示例14: usage
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
private static void usage() {
System.err.println("BigMapOutput -input <input-dir> -output <output-dir> " +
"[-create <filesize in MB>]");
ToolRunner.printGenericCommandUsage(System.err);
System.exit(1);
}
示例15: printUsage
import org.apache.hadoop.util.ToolRunner; //导入方法依赖的package包/类
protected void printUsage(PrintStream out) {
ToolRunner.printGenericCommandUsage(out);
out.println("Usage: gridmix [-generate <MiB>] [-users URI] [-Dname=value ...] <iopath> <trace>");
out.println(" e.g. gridmix -generate 100m foo -");
out.println("Options:");
out.println(" -generate <MiB> : Generate input data of size MiB under "
+ "<iopath>/input/ and generate\n\t\t distributed cache data under "
+ "<iopath>/distributedCache/.");
out.println(" -users <usersResourceURI> : URI that contains the users list.");
out.println("Configuration parameters:");
out.println(" General parameters:");
out.printf(" %-48s : Output directory%n", GRIDMIX_OUT_DIR);
out.printf(" %-48s : Submitting threads%n", GRIDMIX_SUB_THR);
out.printf(" %-48s : Queued job desc%n", GRIDMIX_QUE_DEP);
out.printf(" %-48s : User resolution class%n", GRIDMIX_USR_RSV);
out.printf(" %-48s : Job types (%s)%n", JobCreator.GRIDMIX_JOB_TYPE, getJobTypes());
out.println(" Parameters related to job submission:");
out.printf(" %-48s : Default queue%n",
GridmixJob.GRIDMIX_DEFAULT_QUEUE);
out.printf(" %-48s : Enable/disable using queues in trace%n",
GridmixJob.GRIDMIX_USE_QUEUE_IN_TRACE);
out.printf(" %-48s : Job submission policy (%s)%n",
GridmixJobSubmissionPolicy.JOB_SUBMISSION_POLICY, getSubmissionPolicies());
out.println(" Parameters specific for LOADJOB:");
out.printf(" %-48s : Key fraction of rec%n",
AvgRecordFactory.GRIDMIX_KEY_FRC);
out.println(" Parameters specific for SLEEPJOB:");
out.printf(" %-48s : Whether to ignore reduce tasks%n",
SleepJob.SLEEPJOB_MAPTASK_ONLY);
out.printf(" %-48s : Number of fake locations for map tasks%n",
JobCreator.SLEEPJOB_RANDOM_LOCATIONS);
out.printf(" %-48s : Maximum map task runtime in mili-sec%n",
SleepJob.GRIDMIX_SLEEP_MAX_MAP_TIME);
out.printf(" %-48s : Maximum reduce task runtime in mili-sec (merge+reduce)%n",
SleepJob.GRIDMIX_SLEEP_MAX_REDUCE_TIME);
out.println(" Parameters specific for STRESS submission throttling policy:");
out.printf(" %-48s : jobs vs task-tracker ratio%n",
StressJobFactory.CONF_MAX_JOB_TRACKER_RATIO);
out.printf(" %-48s : maps vs map-slot ratio%n",
StressJobFactory.CONF_OVERLOAD_MAPTASK_MAPSLOT_RATIO);
out.printf(" %-48s : reduces vs reduce-slot ratio%n",
StressJobFactory.CONF_OVERLOAD_REDUCETASK_REDUCESLOT_RATIO);
out.printf(" %-48s : map-slot share per job%n",
StressJobFactory.CONF_MAX_MAPSLOT_SHARE_PER_JOB);
out.printf(" %-48s : reduce-slot share per job%n",
StressJobFactory.CONF_MAX_REDUCESLOT_SHARE_PER_JOB);
}