当前位置: 首页>>代码示例>>Java>>正文


Java ToolRunner类代码示例

本文整理汇总了Java中org.apache.hadoop.util.ToolRunner的典型用法代码示例。如果您正苦于以下问题:Java ToolRunner类的具体用法?Java ToolRunner怎么用?Java ToolRunner使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ToolRunner类属于org.apache.hadoop.util包,在下文中一共展示了ToolRunner类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n",
                getClass().getSimpleName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }

    Job job = new Job(getConf(), "Text to Parquet");
    job.setJarByClass(getClass());
    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    job.setMapperClass(TextToParquetMapper.class);
    job.setNumReduceTasks(0);
    job.setOutputFormatClass(AvroParquetOutputFormat.class);
    AvroParquetOutputFormat.setSchema(job, SCHEMA);
    job.setOutputKeyClass(Void.class);
    job.setOutputValueClass(Group.class);
    return job.waitForCompletion(true) ? 0 : 1;
}
 
开发者ID:mumuhadoop,项目名称:mumu-parquet,代码行数:22,代码来源:MapReduceParquetMapReducer.java

示例2: makeArchiveWithRepl

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
private String makeArchiveWithRepl() throws Exception {
  final String inputPathStr = inputPath.toUri().getPath();
  System.out.println("inputPathStr = " + inputPathStr);

  final URI uri = fs.getUri();
  final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort()
      + archivePath.toUri().getPath() + Path.SEPARATOR;

  final String harName = "foo.har";
  final String fullHarPathStr = prefix + harName;
  final String[] args = { "-archiveName", harName, "-p", inputPathStr,
      "-r 3", "*", archivePath.toString() };
  System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
      HADOOP_ARCHIVES_JAR);
  final HadoopArchives har = new HadoopArchives(conf);
  assertEquals(0, ToolRunner.run(har, args));
  return fullHarPathStr;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestHadoopArchives.java

示例3: testBenchmarkWithProto

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
@Test(timeout=20000)
public void testBenchmarkWithProto() throws Exception {
  int rc = ToolRunner.run(new RPCCallBenchmark(),
      new String[] {
    "--clientThreads", "30",
    "--serverThreads", "30",
    "--time", "5",
    "--serverReaderThreads", "4",
    "--messageSize", "1024",
    "--engine", "protobuf"});
  assertEquals(0, rc);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestRPCCallBenchmark.java

示例4: makeArchive

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
private String makeArchive(Path parentPath, String relGlob) throws Exception {
  final String parentPathStr = parentPath.toUri().getPath();
  final String relPathGlob = relGlob == null ? "*" : relGlob;
  System.out.println("parentPathStr = " + parentPathStr);

  final URI uri = fs.getUri();
  final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort()
      + archivePath.toUri().getPath() + Path.SEPARATOR;

  final String harName = "foo.har";
  final String fullHarPathStr = prefix + harName;
  final String[] args = { "-archiveName", harName, "-p", parentPathStr,
      relPathGlob, archivePath.toString() };
  System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
      HADOOP_ARCHIVES_JAR);
  final HadoopArchives har = new HadoopArchives(conf);
  assertEquals(0, ToolRunner.run(har, args));
  return fullHarPathStr;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestHadoopArchives.java

示例5: runTeraGen

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
private void runTeraGen(Configuration conf, Path sortInput)
    throws Exception {
  String[] genArgs = {NUM_ROWS, sortInput.toString()};

  // Run TeraGen
  assertEquals(ToolRunner.run(conf, new TeraGen(), genArgs), 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestTeraSort.java

示例6: main

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IntegrationTestingUtility.setUseDistributedCluster(conf);
  int ret = ToolRunner.run(conf, new IntegrationTestAcidGuarantees(), args);
  System.exit(ret);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:IntegrationTestAcidGuarantees.java

示例7: confirmFormat

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
private boolean confirmFormat() {
  String parentZnode = getParentZnode();
  System.err.println(
      "===============================================\n" +
      "The configured parent znode " + parentZnode + " already exists.\n" +
      "Are you sure you want to clear all failover information from\n" +
      "ZooKeeper?\n" +
      "WARNING: Before proceeding, ensure that all HDFS services and\n" +
      "failover controllers are stopped!\n" +
      "===============================================");
  try {
    return ToolRunner.confirmPrompt("Proceed formatting " + parentZnode + "?");
  } catch (IOException e) {
    LOG.debug("Failed to confirm", e);
    return false;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:18,代码来源:ZKFailoverController.java

示例8: runTool

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
private String runTool(HdfsConfiguration conf, String[] args, boolean success)
    throws Exception {
  ByteArrayOutputStream o = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(o, true);
  try {
    int ret = ToolRunner.run(new GetConf(conf, out, out), args);
    out.flush();
    System.err.println("Output: " + o.toString());
    assertEquals("Expected " + (success?"success":"failure") +
        " for args: " + Joiner.on(" ").join(args) + "\n" +
        "Output: " + o.toString(),
        success, ret == 0);
    return o.toString();
  } finally {
    o.close();
    out.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestGetConf.java

示例9: run

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
public int run(Path inputDir, int numMappers) throws Exception {
  getConf().set(SEARCHER_INPUTDIR_KEY, inputDir.toString());
  SortedSet<byte []> keys = readKeysToSearch(getConf());
  if (keys.isEmpty()) throw new RuntimeException("No keys to find");
  LOG.info("Count of keys to find: " + keys.size());
  for(byte [] key: keys)  LOG.info("Key: " + Bytes.toStringBinary(key));
  Path hbaseDir = new Path(getConf().get(HConstants.HBASE_DIR));
  // Now read all WALs. In two dirs. Presumes certain layout.
  Path walsDir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
  Path oldWalsDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
  LOG.info("Running Search with keys inputDir=" + inputDir +", numMappers=" + numMappers +
    " against " + getConf().get(HConstants.HBASE_DIR));
  int ret = ToolRunner.run(new WALSearcher(getConf()), new String [] {walsDir.toString(), ""});
  if (ret != 0) return ret;
  return ToolRunner.run(new WALSearcher(getConf()), new String [] {oldWalsDir.toString(), ""});
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:IntegrationTestBigLinkedList.java

示例10: init

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
/** Parse the command line arguments and initialize the data */
private int init(String[] args) {
  try { // initialize file system handle
    fc = FileContext.getFileContext(getConf());
  } catch (IOException ioe) {
    System.err.println("Can not initialize the file system: " + 
        ioe.getLocalizedMessage());
    return -1;
  }

  for (int i = 0; i < args.length; i++) { // parse command line
    if (args[i].equals("-root")) {
      root = new Path(args[++i]);
    } else if (args[i].equals("-inDir")) {
      inDir = new File(args[++i]);
    } else {
      System.err.println(USAGE);
      ToolRunner.printGenericCommandUsage(System.err);
      System.exit(-1);
    }
  }
  return 0;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:24,代码来源:DataGenerator.java

示例11: execute

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
@Override
protected void execute(final String cmd) throws Exception {
  String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
  ToolRunner.run(admin, args);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:CryptoAdminCmdExecutor.java

示例12: exec

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
/**
 * Execute the command, return the result or throw an exception,
 * as appropriate.
 * @param args argument varags.
 * @return return code
 * @throws Exception failure
 */
public static int exec(String... args) throws Exception {
  return ToolRunner.run(new S3ADiag(), args);
}
 
开发者ID:steveloughran,项目名称:cloudup,代码行数:11,代码来源:S3ADiag.java

示例13: main

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
/** the main functions **/
public static void main(String[] args) {
  JobConf job = new JobConf(HadoopArchives.class);

  HadoopArchives harchives = new HadoopArchives(job);
  int ret = 0;

  try{
    ret = ToolRunner.run(harchives, args);
  } catch(Exception e) {
    LOG.debug("Exception in archives  ", e);
    System.err.println(e.getClass().getSimpleName() + " in archives");
    final String s = e.getLocalizedMessage();
    if (s != null) {
      System.err.println(s);
    } else {
      e.printStackTrace(System.err);
    }
    System.exit(1);
  }
  System.exit(ret);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:HadoopArchives.java

示例14: mapreduce

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
@Test
public void mapreduce() {
    MapReduceConfiguration mapReduceConfiguration = new MapReduceConfiguration();
    long start = System.currentTimeMillis();
    log.info("开始计算nginx访问日志IP统计量");
    try {
        String inputPath = mapReduceConfiguration.url() + "/mapreduce/nginxlog/access/input";
        String outputPath = mapReduceConfiguration.url() + "/mapreduce/nginxlog/access/output/daily" + DateFormatUtils.format(new Date(), "yyyyMMddHHmmss");
        ToolRunner.run(new DailyTrafficStatisticsMapRed(), new String[]{inputPath, outputPath});
        mapReduceConfiguration.print(outputPath);
    } catch (Exception e) {
        log.error(e);
    }
    long end = System.currentTimeMillis();
    log.info("运行mapreduce程序花费时间:" + (end - start) / 1000 + "s");
}
 
开发者ID:mumuhadoop,项目名称:mumu-mapreduce,代码行数:17,代码来源:DailyTrafficStatisticsMapRedTest.java

示例15: testCopyDuplication

import org.apache.hadoop.util.ToolRunner; //导入依赖的package包/类
public void testCopyDuplication() throws Exception {
  final FileSystem localfs = FileSystem.get(LOCAL_FS, new Configuration());
  try {    
    MyFile[] files = createFiles(localfs, TEST_ROOT_DIR+"/srcdat");
    ToolRunner.run(new DistCpV1(new Configuration()),
        new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
                      "file:///"+TEST_ROOT_DIR+"/src2/srcdat"});
    assertTrue("Source and destination directories do not match.",
               checkFiles(localfs, TEST_ROOT_DIR+"/src2/srcdat", files));

    assertEquals(DistCpV1.DuplicationException.ERROR_CODE,
        ToolRunner.run(new DistCpV1(new Configuration()),
        new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
                      "file:///"+TEST_ROOT_DIR+"/src2/srcdat",
                      "file:///"+TEST_ROOT_DIR+"/destdat",}));
  }
  finally {
    deldir(localfs, TEST_ROOT_DIR+"/destdat");
    deldir(localfs, TEST_ROOT_DIR+"/srcdat");
    deldir(localfs, TEST_ROOT_DIR+"/src2");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestCopyFiles.java


注:本文中的org.apache.hadoop.util.ToolRunner类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。