当前位置: 首页>>代码示例>>Java>>正文


Java Tool.run方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.Tool.run方法的典型用法代码示例。如果您正苦于以下问题:Java Tool.run方法的具体用法?Java Tool.run怎么用?Java Tool.run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.Tool的用法示例。


在下文中一共展示了Tool.run方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testSplitRamdom

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
@Test
@SuppressWarnings("unchecked")
public void testSplitRamdom() throws Exception {
  Tool tool = new InputSampler<Object,Object>(new Configuration());
  int result = tool.run(new String[] { "-r", Integer.toString(NUM_REDUCES),
      // Use 0.999 probability to reduce the flakiness of the test because
      // the test will fail if the number of samples is less than (number of reduces + 1).
      "-splitRandom", "0.999f", "20", "100",
      input1, input2, output });
  assertEquals(0, result);
  Object[] partitions = readPartitions(output);
  // must be 3 split points since NUM_REDUCES = 4:
  assertEquals(3, partitions.length);
  // check that the partition array is sorted:
  Object[] sortedPartitions = Arrays.copyOf(partitions, partitions.length);
  Arrays.sort(sortedPartitions, new LongWritable.Comparator());
  assertArrayEquals(sortedPartitions, partitions);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:TestInputSamplerTool.java

示例2: testRun_l2t10

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
@Test
public void testRun_l2t10() throws Exception {
  // gen rowkeys file for later test
  Configuration conf = TEST_UTIL.getConfiguration();
  String outputPath = "/run_b2t3";
  Tool tool = new GetRandomRowsByRegions(conf);
  int status = tool.run(new String[] { "-b", "2", "-t", "3", VERTEX_TABLE, outputPath });
  Assert.assertEquals(0, status);

  // merge content
  File tf = mergeResults(conf, outputPath, "rowkeys-1");

  // run test
  File tPath = tf.getParentFile();
  tPath = new File(tPath, "performanceTestResults_" + System.currentTimeMillis());
  FileUtils.forceMkdir(tPath);

  tool = new HGraphClientPerformanceTest(conf);
  status =
      tool.run(new String[] { "-l", "2", "-t", "10", VERTEX_TABLE, EDGE_TABLE,
          tf.getAbsolutePath(), tPath.getAbsolutePath() });
  Assert.assertEquals(0, status);

  // verify test results
  outputTestResults(tPath);
}
 
开发者ID:trendmicro,项目名称:HGraph,代码行数:27,代码来源:HGraphClientPerformanceTestTest.java

示例3: testRun_ml2t10

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
@Test
public void testRun_ml2t10() throws Exception {
  // gen rowkeys file for later test
  Configuration conf = TEST_UTIL.getConfiguration();
  String outputPath = "/run_ml2t10";
  Tool tool = new GetRandomRowsByRegions(conf);
  int status = tool.run(new String[] { "-b", "2", "-t", "3", VERTEX_TABLE, outputPath });
  Assert.assertEquals(0, status);

  // merge content
  File tf = mergeResults(conf, outputPath, "rowkeys-2");

  // run test
  File tPath = tf.getParentFile();
  tPath = new File(tPath, "performanceTestResults_" + System.currentTimeMillis());
  FileUtils.forceMkdir(tPath);

  tool = new HGraphClientPerformanceTest(conf);
  status =
      tool.run(new String[] { "-m", "-l", "2", "-t", "10", VERTEX_TABLE, EDGE_TABLE,
          tf.getAbsolutePath(), tPath.getAbsolutePath() });
  Assert.assertEquals(0, status);

  // verify test results
  outputTestResults(tPath);
}
 
开发者ID:trendmicro,项目名称:HGraph,代码行数:27,代码来源:HGraphClientPerformanceTestTest.java

示例4: testRun_i2000l2t10

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
@Test
public void testRun_i2000l2t10() throws Exception {
  // gen rowkeys file for later test
  Configuration conf = TEST_UTIL.getConfiguration();
  String outputPath = "/run_i2000l2t10";
  Tool tool = new GetRandomRowsByRegions(conf);
  int status = tool.run(new String[] { "-b", "2", "-t", "3", VERTEX_TABLE, outputPath });
  Assert.assertEquals(0, status);

  // merge content
  File tf = mergeResults(conf, outputPath, "rowkeys-2");

  // run test
  File tPath = tf.getParentFile();
  tPath = new File(tPath, "performanceTestResults_" + System.currentTimeMillis());
  FileUtils.forceMkdir(tPath);

  tool = new HGraphClientPerformanceTest(conf);
  status =
      tool.run(new String[] { "-i", "2000", "-l", "2", "-t", "10", VERTEX_TABLE, EDGE_TABLE,
          tf.getAbsolutePath(), tPath.getAbsolutePath() });
  Assert.assertEquals(0, status);

  // verify test results
  outputTestResults(tPath);
}
 
开发者ID:trendmicro,项目名称:HGraph,代码行数:27,代码来源:HGraphClientPerformanceTestTest.java

示例5: testSplitSample

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
@Test
public void testSplitSample() throws Exception {
  Tool tool = new InputSampler<Object,Object>(new Configuration());
  int result = tool.run(new String[] { "-r", Integer.toString(NUM_REDUCES),
      "-splitSample", "10", "100",
      input1, input2, output });
  assertEquals(0, result);

  Object[] partitions = readPartitions(output);
  assertArrayEquals(
      new LongWritable[] { new LongWritable(2L), new LongWritable(7L), new LongWritable(20L),},
      partitions);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:14,代码来源:TestInputSamplerTool.java

示例6: testSplitInterval

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
@Test
public void testSplitInterval() throws Exception {
  Tool tool = new InputSampler<Object,Object>(new Configuration());
  int result = tool.run(new String[] { "-r", Integer.toString(NUM_REDUCES),
      "-splitInterval", "0.5f", "0",
      input1, input2, output });
  assertEquals(0, result);
  Object[] partitions = readPartitions(output);
  assertArrayEquals(new LongWritable[] { new LongWritable(7L), new LongWritable(9L),
    new LongWritable(35L),}, partitions);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:12,代码来源:TestInputSamplerTool.java

示例7: runBalancerCli

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
private void runBalancerCli(Configuration conf,
    long totalUsedSpace, long totalCapacity) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);

  final String[] args = { "-policy", "datanode" };
  final Tool tool = new Cli();    
  tool.setConf(conf);
  final int r = tool.run(args); // start rebalancing
  
  assertEquals("Tools should exit 0 on success", 0, r);
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
  LOG.info("Rebalancing with default ctor.");
  waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:15,代码来源:TestBalancer.java

示例8: runBalancerCli

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
private void runBalancerCli(Configuration conf, long totalUsedSpace,
    long totalCapacity) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);

  final String[] args = {"-policy", "datanode"};
  final Tool tool = new Cli();
  tool.setConf(conf);
  final int r = tool.run(args); // start rebalancing
  
  assertEquals("Tools should exit 0 on success", 0, r);
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
  LOG.info("Rebalancing with default ctor.");
  waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:15,代码来源:TestBalancer.java

示例9: runMRJob

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
public static int runMRJob(Tool tool, String[] args) throws Exception {
    Configuration conf = tool.getConf();
    if (conf == null) {
        conf = new Configuration();
    }

    GenericOptionsParser parser = getParser(conf, args);
    //set the configuration back, so that Tool can configure itself
    tool.setConf(conf);

    //get the args w/o generic hadoop args
    String[] toolArgs = parser.getRemainingArgs();
    return tool.run(toolArgs);
}
 
开发者ID:apache,项目名称:kylin,代码行数:15,代码来源:MRUtil.java

示例10: execute0

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
private static int execute0(
        Configuration conf,
        ClassLoader classLoader,
        ClassDescription clientClass,
        String executionId,
        Map<String, String> batchArguments) throws Exception {
    try (ClassLoaderContext context = new ClassLoaderContext(classLoader)) {
        Configuration copy = new Configuration(conf);
        copy.setClassLoader(classLoader);
        Tool tool = resolveClient(copy, clientClass);
        configure(copy, executionId, batchArguments);
        return tool.run(EMPTY_ARGUMENTS);
    }
}
 
开发者ID:asakusafw,项目名称:asakusafw-compiler,代码行数:15,代码来源:MapReduceRunner.java

示例11: walToHFiles

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
protected void walToHFiles(List<String> dirPaths, TableName tableName) throws IOException {
  Tool player = new WALPlayer();

  // Player reads all files in arbitrary directory structure and creates
  // a Map task for each file. We use ';' as separator
  // because WAL file names contains ','
  String dirs = StringUtils.join(dirPaths, ';');
  String jobname = "Incremental_Backup-" + backupId + "-" + tableName.getNameAsString();

  Path bulkOutputPath = getBulkOutputDirForTable(tableName);
  conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
  conf.set(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ";");
  conf.set(JOB_NAME_CONF_KEY, jobname);
  String[] playerArgs = { dirs, tableName.getNameAsString() };

  try {
    player.setConf(conf);
    int result = player.run(playerArgs);
    if(result != 0) {
      throw new IOException("WAL Player failed");
    }
    conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY);
    conf.unset(JOB_NAME_CONF_KEY);
  } catch (IOException e) {
    throw e;
  } catch (Exception ee) {
    throw new IOException("Can not convert from directory " + dirs
        + " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:31,代码来源:IncrementalTableBackupClient.java

示例12: testJoin

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
void testJoin(Tool bookAndAuthorJoin, boolean mapOnly) throws Exception {
  File authorsFile = getTestTempFile("authors.tsv");
  File booksFile = getTestTempFile("books.tsv");
  File outputDir = getTestTempDir("output");
  outputDir.delete();

  writeLines(authorsFile, readLines("/assignment1/authors.tsv"));
  writeLines(booksFile, readLines("/assignment1/books.tsv"));

  Configuration conf = new Configuration();

  bookAndAuthorJoin.setConf(conf);
  bookAndAuthorJoin.run(new String[] { "--authors", authorsFile.getAbsolutePath(),
      "--books", booksFile.getAbsolutePath(), "--output", outputDir.getAbsolutePath() });

  String outputFilename = mapOnly ? "part-m-00000" : "part-r-00000";
  
  Multimap<String, Book> booksByAuthors = readBooksByAuthors(new File(outputDir, outputFilename));

  assertTrue(booksByAuthors.containsKey("Charles Bukowski"));
  assertTrue(booksByAuthors.get("Charles Bukowski")
      .contains(new Book("Confessions of a Man Insane Enough to Live with Beasts", 1965)));
  assertTrue(booksByAuthors.get("Charles Bukowski")
      .contains(new Book("Hot Water Music", 1983)));

  assertTrue(booksByAuthors.containsKey("Fyodor Dostoyevsky"));
  assertTrue(booksByAuthors.get("Fyodor Dostoyevsky").contains(new Book("Crime and Punishment", 1866)));
  assertTrue(booksByAuthors.get("Fyodor Dostoyevsky").contains(new Book("The Brothers Karamazov", 1880)));

}
 
开发者ID:sscdotopen,项目名称:aim3,代码行数:31,代码来源:BookAndAuthorJoinTest.java

示例13: testRun_b4

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
@Test
public void testRun_b4() throws IOException, Exception {
  // init test table
  String tableName = "test.vertex-02";
  String outputPath = "/run_b4";
  createTestTable(tableName, "00030", "00060");

  // start test
  Configuration conf = TEST_UTIL.getConfiguration();
  Tool driver = new CalculateInputSplitMapper(conf);
  int code = driver.run(new String[] { "-b", "4", tableName, outputPath });
  Assert.assertEquals(0, code);

  // get test results
  Path path = new Path(outputPath);
  FileSystem fs = path.getFileSystem(conf);

  // FileStatus[] files = fs.listStatus(path);
  // for (int a = 0; a < files.length; a++) {
  // System.out.println(files[a].getPath());
  // }
  InputStream is = fs.open(new Path(path, "part-r-00000"));
  LineIterator it = IOUtils.lineIterator(is, "UTF-8");
  System.out.println("print out test results");
  while (it.hasNext()) {
    System.out.println(it.next());
  }
  LineIterator.closeQuietly(it);
  IOUtils.closeQuietly(is);
}
 
开发者ID:trendmicro,项目名称:HGraph,代码行数:31,代码来源:CalculateInputSplitMapperTest.java

示例14: testRun

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
@Test
public void testRun() throws Exception {
  importData(new String[] { "-Dimporttsv.columns=HBASE_ROW_KEY," + CF[0] + ":[email protected]",
      "-Dimporttsv.separator=|" }, TABLE, CF,
    "org/trend/hgraph/mapreduce/pagerank/vertex-test-01.data");
  printTable(TABLE);
  
  Tool tool = new ResetPageRankUpdateFlag(TEST_UTIL.getConfiguration());
  int status = tool.run(new String[] { TABLE });
  Assert.assertEquals(0, status);

  printTable(TABLE);
}
 
开发者ID:trendmicro,项目名称:HGraph,代码行数:14,代码来源:ResetPageRankUpdateFlagTest.java

示例15: run

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
public int run(Tool tool, String[] args) throws Exception
{
	Configuration conf = new Configuration();
   tool.setConf(conf);

   return tool.run(args);
 }
 
开发者ID:ilveroluca,项目名称:seal,代码行数:8,代码来源:SealToolRunner.java


注:本文中的org.apache.hadoop.util.Tool.run方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。