当前位置: 首页>>代码示例>>Java>>正文


Java HttpServer类代码示例

本文整理汇总了Java中org.apache.hadoop.http.HttpServer的典型用法代码示例。如果您正苦于以下问题:Java HttpServer类的具体用法?Java HttpServer怎么用?Java HttpServer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HttpServer类属于org.apache.hadoop.http包,在下文中一共展示了HttpServer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testGetJettyThreads

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
/**
 * Test that it can detect a running Jetty selector.
 */
@Test(timeout=20000)
public void testGetJettyThreads() throws Exception {
  JettyBugMonitor monitor = new JettyBugMonitor(conf);
  
  new File(System.getProperty("build.webapps", "build/webapps") + "/test"
    ).mkdirs();
  HttpServer server = new HttpServer("test", "0.0.0.0", 0, true);
  server.start();
  try {
    List<Long> threads = monitor.waitForJettyThreads();
    assertEquals(1, threads.size());
  } finally {
    server.stop();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:TestJettyBugMonitor.java

示例2: setUp

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
public void setUp() throws Exception {
  new File(System.getProperty("build.webapps", "build/webapps") + "/test"
      ).mkdirs();
  server = new HttpServer("test", "0.0.0.0", 0, true);
  server.addServlet("delay", "/delay", DelayServlet.class);
  server.addServlet("jobend", "/jobend", JobEndServlet.class);
  server.addServlet("fail", "/fail", FailServlet.class);
  server.start();
  int port = server.getPort();
  baseUrl = new URL("http://localhost:" + port + "/");

  JobEndServlet.calledTimes = 0;
  JobEndServlet.requestUri = null;
  DelayServlet.calledTimes = 0;
  FailServlet.calledTimes = 0;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:17,代码来源:TestJobEndNotifier.java

示例3: initializeServer

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
public void initializeServer() throws IOException {

    String serverAddr = conf.get(CLUSTER_BALANCER_ADDR, "localhost:9143");
    InetSocketAddress addr = NetUtils.createSocketAddr(serverAddr);
    clusterDaemonServer = RPC.getServer(this, addr.getHostName(),
            addr.getPort(), conf);
    clusterDaemonServer.start();

    // Http server
    String infoServerAddr = conf.get(CLUSTER_HTTP_BALANCER_ADDR,
            "localhost:50143");
    InetSocketAddress infoAddr = NetUtils.createSocketAddr(infoServerAddr);
    infoServer = new HttpServer("cb", infoAddr.getHostName(),
            infoAddr.getPort(), infoAddr.getPort() == 0, conf);
    infoServer.setAttribute("cluster.balancer", this);
    infoServer.start();
  }
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:DynamicCloudsDaemon.java

示例4: startInfoServer

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
private void startInfoServer() throws IOException {
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(
    java.net.InetAddress.getLocalHost().getCanonicalHostName(),
    0);
  String infoBindAddress = infoSocAddr.getHostName();
  int tmpInfoPort = infoSocAddr.getPort();
  infoServer = new HttpServer("jt", infoBindAddress, tmpInfoPort,
      tmpInfoPort == 0, conf);
  infoServer.setAttribute("job.tracker", this);
  infoServer.start();
  this.infoPort = this.infoServer.getPort();

  String hostname =
    java.net.InetAddress.getLocalHost().getCanonicalHostName();
  this.conf.set(
    "mapred.job.tracker.http.address", hostname + ":" + this.infoPort);
  this.conf.setInt("mapred.job.tracker.info.port", this.infoPort);
  this.conf.set("mapred.job.tracker.info.bindAddress", hostname);

  LOG.info("JobTracker webserver: " + this.infoPort);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:22,代码来源:CoronaJobTracker.java

示例5: generateWarningText

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
public static String generateWarningText(FSNamesystem fsn) {
  // Ideally this should be displayed in RED
  StringBuilder sb = new StringBuilder();
  sb.append("<b>");
  String raidHttpUrl = fsn.getRaidHttpUrl(); 
  if (raidHttpUrl != null) {
    try {
      String raidUIContent = getRaidUIContentWithTimeout(raidHttpUrl);
      if (raidUIContent != null) {
        sb.append(raidUIContent);
      }
    } catch (SocketTimeoutException ste) {
      HttpServer.LOG.error("Fail to fetch raid ui " + raidHttpUrl, ste);
      sb.append(JspHelper.getHTMLLinksText(raidHttpUrl, "Raidnode didn't response in " +
        (RAID_UI_CONNECT_TIMEOUT + RAID_UI_READ_TIMEOUT) + "ms"));
    } catch (Exception e) {
      HttpServer.LOG.error("Fail to fetch raid ui " + raidHttpUrl, e);
      sb.append(JspHelper.getHTMLLinksText(raidHttpUrl, "Raidnode is unreachable"));
    }
  }
  sb.append("<br></b>\n");
  return sb.toString();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:JspHelper.java

示例6: serviceStart

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
@Override
protected void serviceStart() throws Exception {
  try {
    proxyServer = new HttpServer("proxy", bindAddress, port,
        port == 0, getConfig(), acl);
    proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME, 
        ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
    proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);
    proxyServer.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, isSecurityEnabled);
    proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
    proxyServer.start();
  } catch (IOException e) {
    LOG.fatal("Could not start proxy web server",e);
    throw new YarnRuntimeException("Could not start proxy web server",e);
  }
  super.serviceStart();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:WebAppProxy.java

示例7: setupServlets

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
private static void setupServlets(HttpServer httpServer, Configuration conf) {
  httpServer.addInternalServlet("startupProgress",
      StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class);
  httpServer.addInternalServlet("getDelegationToken",
      GetDelegationTokenServlet.PATH_SPEC, 
      GetDelegationTokenServlet.class, true);
  httpServer.addInternalServlet("renewDelegationToken", 
      RenewDelegationTokenServlet.PATH_SPEC, 
      RenewDelegationTokenServlet.class, true);
  httpServer.addInternalServlet("cancelDelegationToken", 
      CancelDelegationTokenServlet.PATH_SPEC, 
      CancelDelegationTokenServlet.class, true);
  httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class,
      true);
  httpServer.addInternalServlet("getimage", "/getimage",
      GetImageServlet.class, true);
  httpServer.addInternalServlet("listPaths", "/listPaths/*",
      ListPathsServlet.class, false);
  httpServer.addInternalServlet("data", "/data/*",
      FileDataServlet.class, false);
  httpServer.addInternalServlet("checksum", "/fileChecksum/*",
      FileChecksumServlets.RedirectServlet.class, false);
  httpServer.addInternalServlet("contentSummary", "/contentSummary/*",
      ContentSummaryServlet.class, false);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:NameNodeHttpServer.java

示例8: testImageTransferTimeout

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
/**
 * Test to verify the read timeout
 */
@Test(timeout = 5000)
public void testImageTransferTimeout() throws Exception {
  HttpServer testServer = HttpServerFunctionalTest.createServer("hdfs");
  try {
    testServer.addServlet("GetImage", "/getimage", TestGetImageServlet.class);
    testServer.start();
    URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
    TransferFsImage.timeout = 2000;
    try {
      TransferFsImage.getFileClient(serverURL.getAuthority(), "txid=1", null,
          null, false);
      fail("TransferImage Should fail with timeout");
    } catch (SocketTimeoutException e) {
      assertEquals("Read should timeout", "Read timed out", e.getMessage());
    }
  } finally {
    if (testServer != null) {
      testServer.stop();
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:TestTransferFsImage.java

示例9: testReadURL

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
@Test
public void testReadURL() throws Exception {
  // Start a simple web server which hosts the log data.
  HttpServer server = new HttpServer("test", "0.0.0.0", 0, true);
  server.start();
  try {
    server.addServlet("fakeLog", "/fakeLog", FakeLogServlet.class);
    URL url = new URL("http://localhost:" + server.getPort() + "/fakeLog");
    EditLogInputStream elis = EditLogFileInputStream.fromUrl(
        url, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID,
        false);
    // Read the edit log and verify that we got all of the data.
    EnumMap<FSEditLogOpCodes, Holder<Integer>> counts =
        FSImageTestUtil.countEditLogOpTypes(elis);
    assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
    assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
    assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));

    // Check that length header was picked up.
    assertEquals(FAKE_LOG_DATA.length, elis.length());
    elis.close();
  } finally {
    server.stop();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:TestEditLogFileInputStream.java

示例10: setupServlets

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
private static void setupServlets(HttpServer httpServer, Configuration conf) {
  httpServer.addInternalServlet("getDelegationToken",
      GetDelegationTokenServlet.PATH_SPEC, GetDelegationTokenServlet.class,
      true);
  httpServer.addInternalServlet("renewDelegationToken",
      RenewDelegationTokenServlet.PATH_SPEC,
      RenewDelegationTokenServlet.class, true);
  httpServer.addInternalServlet("cancelDelegationToken",
      CancelDelegationTokenServlet.PATH_SPEC,
      CancelDelegationTokenServlet.class, true);
  httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class, true);
  httpServer
      .addInternalServlet("listPaths", "/listPaths/*", ListPathsServlet.class,
          false);
  httpServer
      .addInternalServlet("data", "/data/*", FileDataServlet.class, false);
  httpServer.addInternalServlet("checksum", "/fileChecksum/*",
      FileChecksumServlets.RedirectServlet.class, false);
  httpServer.addInternalServlet("contentSummary", "/contentSummary/*",
      ContentSummaryServlet.class, false);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:22,代码来源:NameNodeHttpServer.java

示例11: testNotificationOnLastRetryNormalShutdown

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
@Test
public void testNotificationOnLastRetryNormalShutdown() throws Exception {
  HttpServer server = startHttpServer();
  // Act like it is the second attempt. Default max attempts is 2
  MRApp app = spy(new MRAppWithCustomContainerAllocator(
      2, 2, true, this.getClass().getName(), true, 2, true));
  doNothing().when(app).sysexit();
  Configuration conf = new Configuration();
  conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
      JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
  JobImpl job = (JobImpl)app.submit(conf);
  app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
  // Unregistration succeeds: successfullyUnregistered is set
  app.shutDownJob();
  Assert.assertEquals(true, app.isLastAMRetry());
  Assert.assertEquals(1, JobEndServlet.calledTimes);
  Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",
      JobEndServlet.requestUri.getQuery());
  Assert.assertEquals(JobState.SUCCEEDED.toString(),
    JobEndServlet.foundJobState);
  server.stop();
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:23,代码来源:TestJobEndNotifier.java

示例12: testAbsentNotificationOnNotLastRetryUnregistrationFailure

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
@Test
public void testAbsentNotificationOnNotLastRetryUnregistrationFailure()
    throws Exception {
  HttpServer server = startHttpServer();
  MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
      this.getClass().getName(), true, 1, false));
  doNothing().when(app).sysexit();
  Configuration conf = new Configuration();
  conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
      JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
  JobImpl job = (JobImpl)app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  app.getContext().getEventHandler()
    .handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
  app.waitForInternalState(job, JobStateInternal.REBOOT);
  // Now shutdown.
  // Unregistration fails: isLastAMRetry is recalculated, this is not
  app.shutDownJob();
  // Not the last AM attempt. So user should that the job is still running.
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals(false, app.isLastAMRetry());
  Assert.assertEquals(0, JobEndServlet.calledTimes);
  Assert.assertEquals(null, JobEndServlet.requestUri);
  Assert.assertEquals(null, JobEndServlet.foundJobState);
  server.stop();
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:27,代码来源:TestJobEndNotifier.java

示例13: testNotificationOnLastRetryUnregistrationFailure

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
@Test
public void testNotificationOnLastRetryUnregistrationFailure()
    throws Exception {
  HttpServer server = startHttpServer();
  MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
      this.getClass().getName(), true, 2, false));
  doNothing().when(app).sysexit();
  Configuration conf = new Configuration();
  conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
      JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
  JobImpl job = (JobImpl)app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  app.getContext().getEventHandler()
    .handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
  app.waitForInternalState(job, JobStateInternal.REBOOT);
  // Now shutdown. User should see FAILED state.
  // Unregistration fails: isLastAMRetry is recalculated, this is
  app.shutDownJob();
  Assert.assertEquals(true, app.isLastAMRetry());
  Assert.assertEquals(1, JobEndServlet.calledTimes);
  Assert.assertEquals("jobid=" + job.getID() + "&status=FAILED",
      JobEndServlet.requestUri.getQuery());
  Assert.assertEquals(JobState.FAILED.toString(),
    JobEndServlet.foundJobState);
  server.stop();
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:27,代码来源:TestJobEndNotifier.java

示例14: doGet

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
    throws ServletException, IOException {

  // Do the authorization
  if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
      response)) {
    return;
  }

  PrintWriter out = new PrintWriter(response.getOutputStream());
  String format = request.getParameter("format");
  Collection<MetricsContext> allContexts = 
    ContextFactory.getFactory().getAllContexts();
  if ("json".equals(format)) {
    // Uses Jetty's built-in JSON support to convert the map into JSON.
    out.print(new JSON().toJSON(makeMap(allContexts)));
  } else {
    printMap(out, makeMap(allContexts));
  }
  out.close();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:23,代码来源:MetricsServlet.java

示例15: initialize

import org.apache.hadoop.http.HttpServer; //导入依赖的package包/类
/**
 * Initialize checkpoint.
 */
private void initialize(Configuration conf) throws IOException {
  // Create connection to the namenode.
  shouldRun = true;

  // Initialize other scheduling parameters from the configuration
  checkpointPeriod = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
                                  DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
  checkpointSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY, 
                                DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);

  // Pull out exact http address for posting url to avoid ip aliasing issues
  String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, 
                                 DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
  infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.indexOf(":"));
  
  HttpServer httpServer = backupNode.httpServer;
  httpServer.setAttribute("name.system.image", getFSImage());
  httpServer.setAttribute("name.conf", conf);
  httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);

  LOG.info("Checkpoint Period : " + checkpointPeriod + " secs " +
           "(" + checkpointPeriod/60 + " min)");
  LOG.info("Log Size Trigger  : " + checkpointSize + " bytes " +
           "(" + checkpointSize/1024 + " KB)");
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:29,代码来源:Checkpointer.java


注:本文中的org.apache.hadoop.http.HttpServer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。