当前位置: 首页>>代码示例>>Java>>正文


Java Tool.setConf方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.Tool.setConf方法的典型用法代码示例。如果您正苦于以下问题:Java Tool.setConf方法的具体用法?Java Tool.setConf怎么用?Java Tool.setConf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.Tool的用法示例。


在下文中一共展示了Tool.setConf方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: runBalancerCli

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
private void runBalancerCli(Configuration conf,
    long totalUsedSpace, long totalCapacity) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);

  final String[] args = { "-policy", "datanode" };
  final Tool tool = new Cli();    
  tool.setConf(conf);
  final int r = tool.run(args); // start rebalancing
  
  assertEquals("Tools should exit 0 on success", 0, r);
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
  LOG.info("Rebalancing with default ctor.");
  waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:15,代码来源:TestBalancer.java

示例2: runBalancerCli

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
private void runBalancerCli(Configuration conf, long totalUsedSpace,
    long totalCapacity) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);

  final String[] args = {"-policy", "datanode"};
  final Tool tool = new Cli();
  tool.setConf(conf);
  final int r = tool.run(args); // start rebalancing
  
  assertEquals("Tools should exit 0 on success", 0, r);
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
  LOG.info("Rebalancing with default ctor.");
  waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:15,代码来源:TestBalancer.java

示例3: runMRJob

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
public static int runMRJob(Tool tool, String[] args) throws Exception {
    Configuration conf = tool.getConf();
    if (conf == null) {
        conf = new Configuration();
    }

    GenericOptionsParser parser = getParser(conf, args);
    //set the configuration back, so that Tool can configure itself
    tool.setConf(conf);

    //get the args w/o generic hadoop args
    String[] toolArgs = parser.getRemainingArgs();
    return tool.run(toolArgs);
}
 
开发者ID:apache,项目名称:kylin,代码行数:15,代码来源:MRUtil.java

示例4: walToHFiles

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
protected void walToHFiles(List<String> dirPaths, TableName tableName) throws IOException {
  Tool player = new WALPlayer();

  // Player reads all files in arbitrary directory structure and creates
  // a Map task for each file. We use ';' as separator
  // because WAL file names contains ','
  String dirs = StringUtils.join(dirPaths, ';');
  String jobname = "Incremental_Backup-" + backupId + "-" + tableName.getNameAsString();

  Path bulkOutputPath = getBulkOutputDirForTable(tableName);
  conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
  conf.set(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ";");
  conf.set(JOB_NAME_CONF_KEY, jobname);
  String[] playerArgs = { dirs, tableName.getNameAsString() };

  try {
    player.setConf(conf);
    int result = player.run(playerArgs);
    if(result != 0) {
      throw new IOException("WAL Player failed");
    }
    conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY);
    conf.unset(JOB_NAME_CONF_KEY);
  } catch (IOException e) {
    throw e;
  } catch (Exception ee) {
    throw new IOException("Can not convert from directory " + dirs
        + " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:31,代码来源:IncrementalTableBackupClient.java

示例5: testJoin

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
void testJoin(Tool bookAndAuthorJoin, boolean mapOnly) throws Exception {
  File authorsFile = getTestTempFile("authors.tsv");
  File booksFile = getTestTempFile("books.tsv");
  File outputDir = getTestTempDir("output");
  outputDir.delete();

  writeLines(authorsFile, readLines("/assignment1/authors.tsv"));
  writeLines(booksFile, readLines("/assignment1/books.tsv"));

  Configuration conf = new Configuration();

  bookAndAuthorJoin.setConf(conf);
  bookAndAuthorJoin.run(new String[] { "--authors", authorsFile.getAbsolutePath(),
      "--books", booksFile.getAbsolutePath(), "--output", outputDir.getAbsolutePath() });

  String outputFilename = mapOnly ? "part-m-00000" : "part-r-00000";
  
  Multimap<String, Book> booksByAuthors = readBooksByAuthors(new File(outputDir, outputFilename));

  assertTrue(booksByAuthors.containsKey("Charles Bukowski"));
  assertTrue(booksByAuthors.get("Charles Bukowski")
      .contains(new Book("Confessions of a Man Insane Enough to Live with Beasts", 1965)));
  assertTrue(booksByAuthors.get("Charles Bukowski")
      .contains(new Book("Hot Water Music", 1983)));

  assertTrue(booksByAuthors.containsKey("Fyodor Dostoyevsky"));
  assertTrue(booksByAuthors.get("Fyodor Dostoyevsky").contains(new Book("Crime and Punishment", 1866)));
  assertTrue(booksByAuthors.get("Fyodor Dostoyevsky").contains(new Book("The Brothers Karamazov", 1880)));

}
 
开发者ID:sscdotopen,项目名称:aim3,代码行数:31,代码来源:BookAndAuthorJoinTest.java

示例6: run

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
public int run(Tool tool, String[] args) throws Exception
{
	Configuration conf = new Configuration();
   tool.setConf(conf);

   return tool.run(args);
 }
 
开发者ID:ilveroluca,项目名称:seal,代码行数:8,代码来源:SealToolRunner.java

示例7: testManyBalancerSimultaneously

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
/**
 * Test running many balancer simultaneously.
 *
 * Case-1: First balancer is running. Now, running second one should get
 * "Another balancer is running. Exiting.." IOException and fail immediately
 *
 * Case-2: When running second balancer 'balancer.id' file exists but the
 * lease doesn't exists. Now, the second balancer should run successfully.
 */
@Test(timeout = 100000)
public void testManyBalancerSimultaneously() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  // add an empty node with half of the capacities(4 * CAPACITY) & the same
  // rack
  long[] capacities = new long[] { 4 * CAPACITY };
  String[] racks = new String[] { RACK0 };
  long newCapacity = 2 * CAPACITY;
  String newRack = RACK0;
  LOG.info("capacities = " + long2String(capacities));
  LOG.info("racks      = " + Arrays.asList(racks));
  LOG.info("newCapacity= " + newCapacity);
  LOG.info("newRack    = " + newRack);
  LOG.info("useTool    = " + false);
  assertEquals(capacities.length, racks.length);
  int numOfDatanodes = capacities.length;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length)
      .racks(racks).simulatedCapacities(capacities).build();
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf,
        cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();

    long totalCapacity = sum(capacities);

    // fill up the cluster to be 30% full
    final long totalUsedSpace = totalCapacity * 3 / 10;
    createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
        (short) numOfDatanodes, 0);
    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
        new long[] { newCapacity });

    // Case1: Simulate first balancer by creating 'balancer.id' file. It
    // will keep this file until the balancing operation is completed.
    FileSystem fs = cluster.getFileSystem(0);
    final FSDataOutputStream out = fs
        .create(Balancer.BALANCER_ID_PATH, false);
    out.writeBytes(InetAddress.getLocalHost().getHostName());
    out.hflush();
    assertTrue("'balancer.id' file doesn't exist!",
        fs.exists(Balancer.BALANCER_ID_PATH));

    // start second balancer
    final String[] args = { "-policy", "datanode" };
    final Tool tool = new Cli();
    tool.setConf(conf);
    int exitCode = tool.run(args); // start balancing
    assertEquals("Exit status code mismatches",
        ExitStatus.IO_EXCEPTION.getExitCode(), exitCode);

    // Case2: Release lease so that another balancer would be able to
    // perform balancing.
    out.close();
    assertTrue("'balancer.id' file doesn't exist!",
        fs.exists(Balancer.BALANCER_ID_PATH));
    exitCode = tool.run(args); // start balancing
    assertEquals("Exit status code mismatches",
        ExitStatus.SUCCESS.getExitCode(), exitCode);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestBalancer.java

示例8: testManyBalancerSimultaneously

import org.apache.hadoop.util.Tool; //导入方法依赖的package包/类
/**
 * Test running many balancer simultaneously.
 *
 * Case-1: First balancer is running. Now, running second one should get
 * "Another balancer is running. Exiting.." IOException and fail immediately
 *
 * Case-2: When running second balancer 'balancer.id' file exists but the
 * lease doesn't exists. Now, the second balancer should run successfully.
 */
@Test(timeout = 100000)
public void testManyBalancerSimultaneously() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  // add an empty node with half of the capacities(4 * CAPACITY) & the same
  // rack
  long[] capacities = new long[] { 4 * CAPACITY };
  String[] racks = new String[] { RACK0 };
  long newCapacity = 2 * CAPACITY;
  String newRack = RACK0;
  LOG.info("capacities = " + long2String(capacities));
  LOG.info("racks      = " + Arrays.asList(racks));
  LOG.info("newCapacity= " + newCapacity);
  LOG.info("newRack    = " + newRack);
  LOG.info("useTool    = " + false);
  assertEquals(capacities.length, racks.length);
  int numOfDatanodes = capacities.length;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length)
      .racks(racks).simulatedCapacities(capacities).build();
  cluster.waitActive();
  client = NameNodeProxies.createProxy(conf,
      cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();

  long totalCapacity = sum(capacities);

  // fill up the cluster to be 30% full
  final long totalUsedSpace = totalCapacity * 3 / 10;
  createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
      (short) numOfDatanodes, 0);
  // start up an empty node with the same capacity and on the same rack
  cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
      new long[] { newCapacity });

  // Case1: Simulate first balancer by creating 'balancer.id' file. It
  // will keep this file until the balancing operation is completed.
  FileSystem fs = cluster.getFileSystem(0);
  final FSDataOutputStream out = fs
      .create(Balancer.BALANCER_ID_PATH, false);
  out.writeBytes(InetAddress.getLocalHost().getHostName());
  out.hflush();
  assertTrue("'balancer.id' file doesn't exist!",
      fs.exists(Balancer.BALANCER_ID_PATH));

  // start second balancer
  final String[] args = { "-policy", "datanode" };
  final Tool tool = new Cli();
  tool.setConf(conf);
  int exitCode = tool.run(args); // start balancing
  assertEquals("Exit status code mismatches",
      ExitStatus.IO_EXCEPTION.getExitCode(), exitCode);

  // Case2: Release lease so that another balancer would be able to
  // perform balancing.
  out.close();
  assertTrue("'balancer.id' file doesn't exist!",
      fs.exists(Balancer.BALANCER_ID_PATH));
  exitCode = tool.run(args); // start balancing
  assertEquals("Exit status code mismatches",
      ExitStatus.SUCCESS.getExitCode(), exitCode);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:70,代码来源:TestBalancer.java


注:本文中的org.apache.hadoop.util.Tool.setConf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。