当前位置: 首页>>代码示例>>Java>>正文


Java HttpServer.getPort方法代码示例

本文整理汇总了Java中org.apache.hadoop.http.HttpServer.getPort方法的典型用法代码示例。如果您正苦于以下问题:Java HttpServer.getPort方法的具体用法?Java HttpServer.getPort怎么用?Java HttpServer.getPort使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.http.HttpServer的用法示例。


在下文中一共展示了HttpServer.getPort方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUp

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
public void setUp() throws Exception {
  new File(System.getProperty("build.webapps", "build/webapps") + "/test"
      ).mkdirs();
  server = new HttpServer("test", "0.0.0.0", 0, true);
  server.addServlet("delay", "/delay", DelayServlet.class);
  server.addServlet("jobend", "/jobend", JobEndServlet.class);
  server.addServlet("fail", "/fail", FailServlet.class);
  server.start();
  int port = server.getPort();
  baseUrl = new URL("http://localhost:" + port + "/");

  JobEndServlet.calledTimes = 0;
  JobEndServlet.requestUri = null;
  DelayServlet.calledTimes = 0;
  FailServlet.calledTimes = 0;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:17,代码来源:TestJobEndNotifier.java

示例2: testReadURL

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Test
public void testReadURL() throws Exception {
  // Start a simple web server which hosts the log data.
  HttpServer server = new HttpServer("test", "0.0.0.0", 0, true);
  server.start();
  try {
    server.addServlet("fakeLog", "/fakeLog", FakeLogServlet.class);
    URL url = new URL("http://localhost:" + server.getPort() + "/fakeLog");
    EditLogInputStream elis = EditLogFileInputStream.fromUrl(
        url, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID,
        false);
    // Read the edit log and verify that we got all of the data.
    EnumMap<FSEditLogOpCodes, Holder<Integer>> counts =
        FSImageTestUtil.countEditLogOpTypes(elis);
    assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
    assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
    assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));

    // Check that length header was picked up.
    assertEquals(FAKE_LOG_DATA.length, elis.length());
    elis.close();
  } finally {
    server.stop();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:TestEditLogFileInputStream.java

示例3: setUp

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  dir = new File(System.getProperty("build.webapps", "build/webapps") + "/test");
  System.out.println("dir="+dir.getAbsolutePath());
  if(!dir.exists()) {
    assertTrue(dir.mkdirs());
  }
  server = new HttpServer("test", "0.0.0.0", 0, true);
  server.addServlet("shuffle", "/mapOutput", TaskTracker.MapOutputServlet.class);
  server.setAttribute(JobTracker.SHUFFLE_SSL_ENABLED_KEY, false);
  server.start();
  int port = server.getPort();
  baseUrl = new URL("http://localhost:" + port + "/");
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:15,代码来源:TestShuffleJobToken.java

示例4: initHttpServer

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
protected void initHttpServer(JobConf conf,
    boolean useNettyMapOutputs) throws IOException {

  String infoAddr =
    NetUtils.getServerAddress(conf,
                              "tasktracker.http.bindAddress",
                              "tasktracker.http.port",
                              "mapred.task.tracker.http.address");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  String httpBindAddress = infoSocAddr.getHostName();
  int httpPort = infoSocAddr.getPort();
  server = new HttpServer("task", httpBindAddress, httpPort,
      httpPort == 0, conf);
  workerThreads = conf.getInt("tasktracker.http.threads", 40);
  server.setThreads(1, workerThreads);
  // let the jsp pages get to the task tracker, config, and other relevant
  // objects
  FileSystem local = FileSystem.getLocal(conf);
  this.localDirAllocator = new LocalDirAllocator("mapred.local.dir");
  server.setAttribute("task.tracker", this);
  server.setAttribute("local.file.system", local);
  server.setAttribute("conf", conf);
  server.setAttribute("log", LOG);
  server.setAttribute("localDirAllocator", localDirAllocator);
  server.setAttribute("shuffleServerMetrics", shuffleServerMetrics);
  server.setAttribute(ReconfigurationServlet.
                      CONF_SERVLET_RECONFIGURABLE_PREFIX + "/ttconfchange",
                      TaskTracker.this);
  server.setAttribute("nettyMapOutputHttpPort", nettyMapOutputHttpPort);
  server.addInternalServlet("reconfiguration", "/ttconfchange",
                              ReconfigurationServlet.class);
  server.addInternalServlet(
    "mapOutput", "/mapOutput", MapOutputServlet.class);
  server.addInternalServlet("taskLog", "/tasklog", TaskLogServlet.class);
  server.start();
  this.httpPort = server.getPort();
  checkJettyPort();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:39,代码来源:TaskTracker.java

示例5: setupHttpServer

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
private MiniDFSCluster setupHttpServer(Boolean testPositive) 
        throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean("dfs.enableHftp", testPositive);
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  NameNode nameNode = cluster.getNameNode();
  HttpServer httpServer = nameNode.getHttpServer();
  assertNotNull("httpServer should not be null", httpServer);
  
  int port = httpServer.getPort();
  baseUrl = new URL("http://localhost:" + port + "/");
  return cluster;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:14,代码来源:TestNameNodeLoadHttp.java

示例6: postTrashAndTest

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
private void postTrashAndTest(String key, int value, NameNode namenode, String desc) {
  HttpServer server = namenode.getHttpServer();
  assertNotNull("Server is null.", server);

  int port = server.getPort();
  String res = execPost("http://127.0.0.1:" + port + "/nnconfchange", key + "=" + value);
  LOG.info("The port number is: " + port);
  LOG.info(res);

  long trash = namenode.getTrashDeletionInterval() / TrashPolicyDefault.MSECS_PER_MINUTE;
  assertEquals(desc + " Trash Interval for " + namenode.getNameserviceID()
      + " not updated successfully.", value, trash);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:14,代码来源:TestReconfigurationServletServiceKeySuffix.java

示例7: start

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
void start() throws IOException {
  final InetSocketAddress bindAddr = getAddress(conf);

  // initialize the webserver for uploading/downloading files.
  LOG.info("Starting web server as: "+ SecurityUtil.getServerPrincipal(conf
      .get(DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY),
      bindAddr.getHostName()));

  int tmpInfoPort = bindAddr.getPort();
  httpServer = new HttpServer("journal", bindAddr.getHostName(),
      tmpInfoPort, tmpInfoPort == 0, conf, new AccessControlList(conf
          .get(DFS_ADMIN, " "))) {
    {
      if (UserGroupInformation.isSecurityEnabled()) {
        initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
            DFS_JOURNALNODE_KEYTAB_FILE_KEY);
      }
    }
  };
  httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
  httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
  httpServer.addInternalServlet("getJournal", "/getJournal",
      GetJournalEditServlet.class, true);
  httpServer.start();

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = httpServer.getPort();

  LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:31,代码来源:JournalNodeHttpServer.java

示例8: startHttpServer

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
private static HttpServer startHttpServer() throws Exception {
  new File(System.getProperty(
      "build.webapps", "build/webapps") + "/test").mkdirs();
  HttpServer server = new HttpServer("test", "0.0.0.0", 0, true);
  server.addServlet("jobend", "/jobend", JobEndServlet.class);
  server.start();

  JobEndServlet.calledTimes = 0;
  JobEndServlet.requestUri = null;
  JobEndServlet.baseUrl = "http://localhost:" + server.getPort() + "/";
  JobEndServlet.foundJobState = null;
  return server;
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:14,代码来源:TestJobEndNotifier.java

示例9: start

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
void start() throws IOException {
  final InetSocketAddress bindAddr = getAddress(conf);

  // initialize the webserver for uploading/downloading files.
  LOG.info("Starting web server as: "+ SecurityUtil.getServerPrincipal(conf
      .get(DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY),
      bindAddr.getHostName()));

  int tmpInfoPort = bindAddr.getPort();
  httpServer = new HttpServer("journal", bindAddr.getHostName(),
      tmpInfoPort, tmpInfoPort == 0, conf, new AccessControlList(conf
          .get(DFS_ADMIN, " "))) {
    {
      if (UserGroupInformation.isSecurityEnabled()) {
        initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
            DFSUtil.getSpnegoKeytabKey(conf, DFS_JOURNALNODE_KEYTAB_FILE_KEY));
      }
    }
  };
  httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
  httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
  httpServer.addInternalServlet("getJournal", "/getJournal",
      GetJournalEditServlet.class, true);
  httpServer.start();

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = httpServer.getPort();

  LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort);
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:31,代码来源:JournalNodeHttpServer.java

示例10: setup

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@BeforeClass public static void setup() throws Exception {
  new File(System.getProperty("build.webapps", "build/webapps") + "/test"
           ).mkdirs();
  server = new HttpServer("test", "0.0.0.0", 0, true);
  server.start();
  int port = server.getPort();
  baseUrl = new URL("http://localhost:" + port + "/");
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:9,代码来源:TestJMXJsonServlet.java

示例11: setUp

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  dir = new File(System.getProperty("build.webapps", "build/webapps") + "/test");
  System.out.println("dir="+dir.getAbsolutePath());
  if(!dir.exists()) {
    assertTrue(dir.mkdirs());
  }
  server = new HttpServer("test", "0.0.0.0", 0, true);
  server.addServlet("shuffle", "/mapOutput", TaskTracker.MapOutputServlet.class);
  server.start();
  int port = server.getPort();
  baseUrl = new URL("http://localhost:" + port + "/");
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:14,代码来源:TestShuffleJobToken.java

示例12: initSecondary

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
 * Initialize the webserver so that the primary namenode can fetch
 * transaction logs from standby via http.
 */
void initSecondary(Configuration conf) throws IOException {

  nameNodeAddr = avatarNode.getRemoteNamenodeAddress(conf);
  this.primaryNamenode =
      (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
          NamenodeProtocol.versionID, nameNodeAddr, conf);

  fsName = avatarNode.getRemoteNamenodeHttpName(conf);

  // Initialize other scheduling parameters from the configuration
  checkpointEnabled = conf.getBoolean("fs.checkpoint.enabled", false);
  checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
  checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

  // initialize the webserver for uploading files.
  String infoAddr = 
    NetUtils.getServerAddress(conf,
                              "dfs.secondary.info.bindAddress",
                              "dfs.secondary.info.port",
                              "dfs.secondary.http.address");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  infoBindAddress = infoSocAddr.getHostName();
  int tmpInfoPort = infoSocAddr.getPort();
  infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
      tmpInfoPort == 0, conf);
  infoServer.setAttribute("name.system.image", fsImage);
  this.infoServer.setAttribute("name.conf", conf);
  infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
  infoServer.start();

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = infoServer.getPort();
  conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort);
  LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
  LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
           "(" + checkpointPeriod/60 + " min)");
  LOG.warn("Log Size Trigger    :" + checkpointSize + " bytes " +
           "(" + checkpointSize/1024 + " KB)");
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:44,代码来源:Standby.java

示例13: setUp

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
protected void setUp() throws Exception {
  server = new HttpServer("jmx", "0.0.0.0", 0, true);
  server.start();
  baseUrl = new URL("http://localhost:" + server.getPort() + "/");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:6,代码来源:JMXJsonServletTestCase.java

示例14: testDynamicLogLevel

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
public void testDynamicLogLevel() throws Exception {
  String logName = TestLogLevel.class.getName();
  Log testlog = LogFactory.getLog(logName);

  //only test Log4JLogger
  if (testlog instanceof Log4JLogger) {
    Logger log = ((Log4JLogger)testlog).getLogger();
    log.debug("log.debug1");
    log.info("log.info1");
    log.error("log.error1");
    assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));

    HttpServer server = new HttpServer("..", "localhost", 22222, true);
    server.start();
    int port = server.getPort();

    //servlet
    URL url = new URL("http://localhost:" + port
        + "/logLevel?log=" + logName + "&level=" + Level.ERROR);
    out.println("*** Connecting to " + url);
    URLConnection connection = url.openConnection();
    connection.connect();

    BufferedReader in = new BufferedReader(new InputStreamReader(
        connection.getInputStream()));
    for(String line; (line = in.readLine()) != null; out.println(line));
    in.close();

    log.debug("log.debug2");
    log.info("log.info2");
    log.error("log.error2");
    assertTrue(Level.ERROR.equals(log.getEffectiveLevel()));

    //command line
    String[] args = {"-setlevel", "localhost:"+port, logName,""+Level.DEBUG};
    LogLevel.main(args);
    log.debug("log.debug3");
    log.info("log.info3");
    log.error("log.error3");
    assertTrue(Level.DEBUG.equals(log.getEffectiveLevel()));
  }
  else {
    out.println(testlog.getClass() + " not tested.");
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:46,代码来源:TestLogLevel.java

示例15: initSecondary

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
 * Initialize the webserver so that the primary namenode can fetch
 * transaction logs from standby via http.
 */
void initSecondary(Configuration conf) throws IOException {

  fsName = AvatarNode.getRemoteNamenodeHttpName(conf,
      avatarNode.getInstanceId());

  // Initialize other scheduling parameters from the configuration
  checkpointEnabled = conf.getBoolean("fs.checkpoint.enabled", false);
  checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
  checkpointTxnCount = NNStorageConfiguration.getCheckpointTxnCount(conf);
  delayedScheduledCheckpointTime = conf.getBoolean("fs.checkpoint.delayed",
      false) ? AvatarNode.now() + checkpointPeriod * 1000 : 0;
  

  // initialize the webserver for uploading files.
  String infoAddr = 
    NetUtils.getServerAddress(conf,
                              "dfs.secondary.info.bindAddress",
                              "dfs.secondary.info.port",
                              "dfs.secondary.http.address");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  String infoBindIpAddress = infoSocAddr.getAddress().getHostAddress();
  int tmpInfoPort = infoSocAddr.getPort();
  infoServer = new HttpServer("secondary", infoBindIpAddress, tmpInfoPort,
      tmpInfoPort == 0, conf);
  infoServer.setAttribute("name.system.image", fsImage);
  this.infoServer.setAttribute("name.conf", conf);
  infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
  infoServer.start();
  avatarNode.httpServer.setAttribute("avatar.node", avatarNode);
  avatarNode.httpServer.addInternalServlet("outstandingnodes",
      "/outstandingnodes", OutStandingDatanodesServlet.class);

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = infoServer.getPort();
  conf.set("dfs.secondary.http.address", infoBindIpAddress + ":" +infoPort);
  LOG.info("Secondary Web-server up at: " + infoBindIpAddress + ":" +infoPort);
  LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
           "(" + checkpointPeriod/60 + " min)");
  if (delayedScheduledCheckpointTime > 0) {
    LOG.warn("Standby: Checkpointing will be delayed by: " + checkpointPeriod + " seconds");
  }
  LOG.warn("Log Size Trigger    :" + checkpointTxnCount + " transactions.");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:48,代码来源:Standby.java


注:本文中的org.apache.hadoop.http.HttpServer.getPort方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。