当前位置: 首页>>代码示例>>Java>>正文


Java TestHDFSServerPorts类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.TestHDFSServerPorts的典型用法代码示例。如果您正苦于以下问题:Java TestHDFSServerPorts类的具体用法?Java TestHDFSServerPorts怎么用?Java TestHDFSServerPorts使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TestHDFSServerPorts类属于org.apache.hadoop.hdfs包,在下文中一共展示了TestHDFSServerPorts类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setDataNodePorts

import org.apache.hadoop.hdfs.TestHDFSServerPorts; //导入依赖的package包/类
private void setDataNodePorts(Configuration conf) {
  conf.set("dfs.datanode.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
  conf.set("dfs.datanode.http.address", 
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + "0");
  conf.set("dfs.datanode.ipc.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:9,代码来源:TestMRServerPorts.java

示例2: testJobTrackerPorts

import org.apache.hadoop.hdfs.TestHDFSServerPorts; //导入依赖的package包/类
/**
 * Verify JobTracker port usage.
 */
public void testJobTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(1, hdfs.getConfig());

    // start job tracker on the same port as name-node
    JobConf conf2 = new JobConf(hdfs.getConfig());
    conf2.set("mapred.job.tracker",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartJobTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartJobTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartJobTracker(conf2);
    assertTrue(started); // should start now

  } finally {
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:40,代码来源:TestMRServerPorts.java

示例3: testTaskTrackerPorts

import org.apache.hadoop.hdfs.TestHDFSServerPorts; //导入依赖的package包/类
/**
 * Verify JobTracker port usage.
 */
public void testTaskTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  JobTracker jt = null;
  JTRunner runner = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(2, hdfs.getConfig());

    JobConf conf2 = new JobConf(hdfs.getConfig());
    runner = new JTRunner();
    jt = startJobTracker(conf2, runner);

    // start job tracker on the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartTaskTracker(conf2);
    assertTrue(started); // should start now
  } catch (IOException ioe) {
    // HACK!  we know this message isn't a problem, but it's polluting our
    // daily build test results.  Just ignore it for now...
    if (ioe.getMessage().matches("Cannot delete.*because it's outside of.*")) {
      System.out.println("Ignoring: " + ioe.getMessage());
    } else {
      throw ioe;
    }
  } finally {
    if (jt != null) {
      jt.fs.close();
      jt.stopTracker();
      runner.interrupt();
      runner.join();
    }
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:60,代码来源:TestMRServerPorts.java

示例4: testTaskTrackerPorts

import org.apache.hadoop.hdfs.TestHDFSServerPorts; //导入依赖的package包/类
/**
 * Verify JobTracker port usage.
 */
public void testTaskTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  JobTracker jt = null;
  JTRunner runner = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(2, hdfs.getConfig());

    JobConf conf2 = new JobConf(hdfs.getConfig());
    runner = new JTRunner();
    jt = startJobTracker(conf2, runner);

    // start job tracker on the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartTaskTracker(conf2);
    assertTrue(started); // should start now
  } finally {
    if (jt != null) {
      jt.fs.close();
      jt.stopTracker();
      runner.interrupt();
      runner.join();
    }
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:52,代码来源:TestMRServerPorts.java


注:本文中的org.apache.hadoop.hdfs.TestHDFSServerPorts类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。