当前位置: 首页>>代码示例>>Java>>正文


Java HttpServer.setAttribute方法代码示例

本文整理汇总了Java中org.apache.hadoop.http.HttpServer.setAttribute方法的典型用法代码示例。如果您正苦于以下问题:Java HttpServer.setAttribute方法的具体用法?Java HttpServer.setAttribute怎么用?Java HttpServer.setAttribute使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.http.HttpServer的用法示例。


在下文中一共展示了HttpServer.setAttribute方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initializeServer

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
public void initializeServer() throws IOException {

    String serverAddr = conf.get(CLUSTER_BALANCER_ADDR, "localhost:9143");
    InetSocketAddress addr = NetUtils.createSocketAddr(serverAddr);
    clusterDaemonServer = RPC.getServer(this, addr.getHostName(),
            addr.getPort(), conf);
    clusterDaemonServer.start();

    // Http server
    String infoServerAddr = conf.get(CLUSTER_HTTP_BALANCER_ADDR,
            "localhost:50143");
    InetSocketAddress infoAddr = NetUtils.createSocketAddr(infoServerAddr);
    infoServer = new HttpServer("cb", infoAddr.getHostName(),
            infoAddr.getPort(), infoAddr.getPort() == 0, conf);
    infoServer.setAttribute("cluster.balancer", this);
    infoServer.start();
  }
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:DynamicCloudsDaemon.java

示例2: startInfoServer

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
private void startInfoServer() throws IOException {
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(
    java.net.InetAddress.getLocalHost().getCanonicalHostName(),
    0);
  String infoBindAddress = infoSocAddr.getHostName();
  int tmpInfoPort = infoSocAddr.getPort();
  infoServer = new HttpServer("jt", infoBindAddress, tmpInfoPort,
      tmpInfoPort == 0, conf);
  infoServer.setAttribute("job.tracker", this);
  infoServer.start();
  this.infoPort = this.infoServer.getPort();

  String hostname =
    java.net.InetAddress.getLocalHost().getCanonicalHostName();
  this.conf.set(
    "mapred.job.tracker.http.address", hostname + ":" + this.infoPort);
  this.conf.setInt("mapred.job.tracker.info.port", this.infoPort);
  this.conf.set("mapred.job.tracker.info.bindAddress", hostname);

  LOG.info("JobTracker webserver: " + this.infoPort);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:22,代码来源:CoronaJobTracker.java

示例3: serviceStart

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Override
protected void serviceStart() throws Exception {
  try {
    proxyServer = new HttpServer("proxy", bindAddress, port,
        port == 0, getConfig(), acl);
    proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME, 
        ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
    proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);
    proxyServer.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, isSecurityEnabled);
    proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
    proxyServer.start();
  } catch (IOException e) {
    LOG.fatal("Could not start proxy web server",e);
    throw new YarnRuntimeException("Could not start proxy web server",e);
  }
  super.serviceStart();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:WebAppProxy.java

示例4: initialize

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
 * Initialize checkpoint.
 */
private void initialize(Configuration conf) throws IOException {
  // Create connection to the namenode.
  shouldRun = true;

  // Initialize other scheduling parameters from the configuration
  checkpointPeriod = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
                                  DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
  checkpointSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY, 
                                DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);

  // Pull out exact http address for posting url to avoid ip aliasing issues
  String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, 
                                 DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
  infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.indexOf(":"));
  
  HttpServer httpServer = backupNode.httpServer;
  httpServer.setAttribute("name.system.image", getFSImage());
  httpServer.setAttribute("name.conf", conf);
  httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);

  LOG.info("Checkpoint Period : " + checkpointPeriod + " secs " +
           "(" + checkpointPeriod/60 + " min)");
  LOG.info("Log Size Trigger  : " + checkpointSize + " bytes " +
           "(" + checkpointSize/1024 + " KB)");
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:29,代码来源:Checkpointer.java

示例5: setUp

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  dir = new File(System.getProperty("build.webapps", "build/webapps") + "/test");
  System.out.println("dir="+dir.getAbsolutePath());
  if(!dir.exists()) {
    assertTrue(dir.mkdirs());
  }
  server = new HttpServer("test", "0.0.0.0", 0, true);
  server.addServlet("shuffle", "/mapOutput", TaskTracker.MapOutputServlet.class);
  server.setAttribute(JobTracker.SHUFFLE_SSL_ENABLED_KEY, false);
  server.start();
  int port = server.getPort();
  baseUrl = new URL("http://localhost:" + port + "/");
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:15,代码来源:TestShuffleJobToken.java

示例6: start

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Override
public synchronized void start() throws IOException {
  if (started) return;
  super.start();
  // initialize our queues from the config settings
  if (null == schedConf) {
    schedConf = new CapacitySchedulerConf();
  }

  // Initialize queues
  QueueManager queueManager = taskTrackerManager.getQueueManager();
  Set<String> queueNames = queueManager.getQueues();
  initialize(queueManager, parseQueues(queueNames, schedConf), 
      getConf(), schedConf);
  
  // listen to job changes
  taskTrackerManager.addJobInProgressListener(jobQueuesManager);

  //Start thread for initialization
  if (initializationPoller == null) {
    this.initializationPoller = new JobInitializationPoller(
        jobQueuesManager, schedConf, queueNames, taskTrackerManager);
  }
  initializationPoller.init(queueNames.size(), schedConf);
  initializationPoller.setDaemon(true);
  initializationPoller.start();

  if (taskTrackerManager instanceof JobTracker) {
    JobTracker jobTracker = (JobTracker) taskTrackerManager;
    HttpServer infoServer = jobTracker.infoServer;
    infoServer.setAttribute("scheduler", this);
    infoServer.addServlet("scheduler", "/scheduler",
        CapacitySchedulerServlet.class);
  }

  started = true;
  LOG.info("Capacity scheduler initialized " + queueNames.size() + " queues");  
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:39,代码来源:CapacityTaskScheduler.java

示例7: initHttpServer

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
protected void initHttpServer(JobConf conf,
    boolean useNettyMapOutputs) throws IOException {

  String infoAddr =
    NetUtils.getServerAddress(conf,
                              "tasktracker.http.bindAddress",
                              "tasktracker.http.port",
                              "mapred.task.tracker.http.address");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  String httpBindAddress = infoSocAddr.getHostName();
  int httpPort = infoSocAddr.getPort();
  server = new HttpServer("task", httpBindAddress, httpPort,
      httpPort == 0, conf);
  workerThreads = conf.getInt("tasktracker.http.threads", 40);
  server.setThreads(1, workerThreads);
  // let the jsp pages get to the task tracker, config, and other relevant
  // objects
  FileSystem local = FileSystem.getLocal(conf);
  this.localDirAllocator = new LocalDirAllocator("mapred.local.dir");
  server.setAttribute("task.tracker", this);
  server.setAttribute("local.file.system", local);
  server.setAttribute("conf", conf);
  server.setAttribute("log", LOG);
  server.setAttribute("localDirAllocator", localDirAllocator);
  server.setAttribute("shuffleServerMetrics", shuffleServerMetrics);
  server.setAttribute(ReconfigurationServlet.
                      CONF_SERVLET_RECONFIGURABLE_PREFIX + "/ttconfchange",
                      TaskTracker.this);
  server.setAttribute("nettyMapOutputHttpPort", nettyMapOutputHttpPort);
  server.addInternalServlet("reconfiguration", "/ttconfchange",
                              ReconfigurationServlet.class);
  server.addInternalServlet(
    "mapOutput", "/mapOutput", MapOutputServlet.class);
  server.addInternalServlet("taskLog", "/tasklog", TaskLogServlet.class);
  server.start();
  this.httpPort = server.getPort();
  checkJettyPort();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:39,代码来源:TaskTracker.java

示例8: start

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
void start() throws IOException {
  final InetSocketAddress bindAddr = getAddress(conf);

  // initialize the webserver for uploading/downloading files.
  LOG.info("Starting web server as: "+ SecurityUtil.getServerPrincipal(conf
      .get(DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY),
      bindAddr.getHostName()));

  int tmpInfoPort = bindAddr.getPort();
  httpServer = new HttpServer("journal", bindAddr.getHostName(),
      tmpInfoPort, tmpInfoPort == 0, conf, new AccessControlList(conf
          .get(DFS_ADMIN, " "))) {
    {
      if (UserGroupInformation.isSecurityEnabled()) {
        initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
            DFS_JOURNALNODE_KEYTAB_FILE_KEY);
      }
    }
  };
  httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
  httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
  httpServer.addInternalServlet("getJournal", "/getJournal",
      GetJournalEditServlet.class, true);
  httpServer.start();

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = httpServer.getPort();

  LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:31,代码来源:JournalNodeHttpServer.java

示例9: start

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
void start() throws IOException {
  final InetSocketAddress bindAddr = getAddress(conf);

  // initialize the webserver for uploading/downloading files.
  LOG.info("Starting web server as: "+ SecurityUtil.getServerPrincipal(conf
      .get(DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY),
      bindAddr.getHostName()));

  int tmpInfoPort = bindAddr.getPort();
  httpServer = new HttpServer("journal", bindAddr.getHostName(),
      tmpInfoPort, tmpInfoPort == 0, conf, new AccessControlList(conf
          .get(DFS_ADMIN, " "))) {
    {
      if (UserGroupInformation.isSecurityEnabled()) {
        initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
            DFSUtil.getSpnegoKeytabKey(conf, DFS_JOURNALNODE_KEYTAB_FILE_KEY));
      }
    }
  };
  httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
  httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
  httpServer.addInternalServlet("getJournal", "/getJournal",
      GetJournalEditServlet.class, true);
  httpServer.start();

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = httpServer.getPort();

  LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort);
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:31,代码来源:JournalNodeHttpServer.java

示例10: ClusterManager

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
 * Construct ClusterManager given {@link CoronaConf}
 *
 * @param conf the configuration for the ClusterManager
 * @throws IOException
 */
public ClusterManager(CoronaConf conf) throws IOException {
  this.conf = conf;
  initLegalTypes();

  metrics = new ClusterManagerMetrics(getTypes());

  sessionManager = new SessionManager(this);
  sessionManager.setConf(conf);

  sessionHistoryManager = new SessionHistoryManager();
  sessionHistoryManager.setConf(conf);

  HostsFileReader hostsReader =
      new HostsFileReader(conf.getHostsFile(), conf.getExcludesFile());
  nodeManager = new NodeManager(this, hostsReader);
  nodeManager.setConf(conf);

  sessionNotifier = new SessionNotifier(sessionManager, this, metrics);
  sessionNotifier.setConf(conf);

  scheduler = new Scheduler(nodeManager, sessionManager,
      sessionNotifier, getTypes(), metrics, conf);
  scheduler.start();
  metrics.registerUpdater(scheduler, sessionNotifier);

  InetSocketAddress infoSocAddr =
      NetUtils.createSocketAddr(conf.getClusterManagerHttpAddress());
  infoServer =
      new HttpServer("cm", infoSocAddr.getHostName(), infoSocAddr.getPort(),
                     infoSocAddr.getPort() == 0, conf);
  infoServer.setAttribute("cm", this);
  infoServer.start();

  startTime = clock.getTime();
  hostName = infoSocAddr.getHostName();
  safeMode = false;
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:44,代码来源:ClusterManager.java

示例11: initSecondary

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
 * Initialize the webserver so that the primary namenode can fetch
 * transaction logs from standby via http.
 */
void initSecondary(Configuration conf) throws IOException {

  nameNodeAddr = avatarNode.getRemoteNamenodeAddress(conf);
  this.primaryNamenode =
      (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
          NamenodeProtocol.versionID, nameNodeAddr, conf);

  fsName = avatarNode.getRemoteNamenodeHttpName(conf);

  // Initialize other scheduling parameters from the configuration
  checkpointEnabled = conf.getBoolean("fs.checkpoint.enabled", false);
  checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
  checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

  // initialize the webserver for uploading files.
  String infoAddr = 
    NetUtils.getServerAddress(conf,
                              "dfs.secondary.info.bindAddress",
                              "dfs.secondary.info.port",
                              "dfs.secondary.http.address");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  infoBindAddress = infoSocAddr.getHostName();
  int tmpInfoPort = infoSocAddr.getPort();
  infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
      tmpInfoPort == 0, conf);
  infoServer.setAttribute("name.system.image", fsImage);
  this.infoServer.setAttribute("name.conf", conf);
  infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
  infoServer.start();

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = infoServer.getPort();
  conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort);
  LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
  LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
           "(" + checkpointPeriod/60 + " min)");
  LOG.warn("Log Size Trigger    :" + checkpointSize + " bytes " +
           "(" + checkpointSize/1024 + " KB)");
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:44,代码来源:Standby.java

示例12: start

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
@Override
public void start() throws IOException {
  Configuration conf = getConf();
  QueueManager queueManager = taskTrackerManager.getQueueManager();
  allocations = new Allocations(conf,queueManager);
  scheduler = ReflectionUtils.newInstance(
      conf.getClass(PrioritySchedulerOptions.DYNAMIC_SCHEDULER_SCHEDULER,
      PriorityScheduler.class, QueueTaskScheduler.class), conf);
  scheduler.setAllocator(allocations);
  scheduler.setConf(conf);
  scheduler.setTaskTrackerManager(taskTrackerManager);
  scheduler.start();
  long interval = conf.getLong(PrioritySchedulerOptions.DYNAMIC_SCHEDULER_ALLOC_INTERVAL,20)*1000;
   
  timer.scheduleAtFixedRate(allocations, interval, interval);   
  for (String queue: queueManager.getLeafQueueNames()) {
    Object info = queueManager.getSchedulerInfo(queue);
    QueueInfo queueInfo = new QueueInfo(queue, info, allocations); 
    queueManager.setSchedulerInfo(queue, queueInfo);
  }
  if (taskTrackerManager instanceof JobTracker) {
    JobTracker jobTracker = (JobTracker) taskTrackerManager;
    HttpServer infoServer = jobTracker.infoServer;
    infoServer.setAttribute("scheduler", this);
    infoServer.addServlet("scheduler", "/scheduler",
        DynamicPriorityServlet.class);
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:29,代码来源:DynamicPriorityScheduler.java

示例13: ClusterManager

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
   * Construct ClusterManager given {@link CoronaConf}
   *
   * @param conf the configuration for the ClusterManager
   * @param recoverFromDisk true if we are restarting after going down while
   *                        in Safe Mode
   * @throws IOException
   */
  public ClusterManager(CoronaConf conf, boolean recoverFromDisk)
    throws IOException {
    this.conf = conf;
    HostsFileReader hostsReader =
      new HostsFileReader(conf.getHostsFile(), conf.getExcludesFile());
    initLegalTypes();
    metrics = new ClusterManagerMetrics(getTypes());

    if (recoverFromDisk) {
      recoverClusterManagerFromDisk(hostsReader);
    } else {
      File stateFile = new File(conf.getCMStateFile());
      if (stateFile.exists()) {
        throw new IOException("State file " + stateFile.getAbsolutePath() +
          " exists, but recoverFromDisk is not set, delete the state file first");
      }
      LOG.info("Starting Cluster Manager with clean state");
      startTime = clock.getTime();
      lastRestartTime = startTime;
      nodeManager = new NodeManager(this, hostsReader);
      nodeManager.setConf(conf);
      sessionManager = new SessionManager(this);
      sessionNotifier = new SessionNotifier(sessionManager, this, metrics);
    }
    sessionManager.setConf(conf);
    sessionNotifier.setConf(conf);

    sessionHistoryManager = new SessionHistoryManager();
    sessionHistoryManager.setConf(conf);

    scheduler = new Scheduler(nodeManager, sessionManager,
        sessionNotifier, getTypes(), metrics, conf);
    scheduler.start();
    metrics.registerUpdater(scheduler, sessionNotifier);

    InetSocketAddress infoSocAddr =
        NetUtils.createSocketAddr(conf.getClusterManagerHttpAddress());
    infoServer =
        new HttpServer("cm", infoSocAddr.getHostName(), infoSocAddr.getPort(),
                       infoSocAddr.getPort() == 0, conf);
    infoServer.setAttribute("cm", this);
    infoServer.start();

    hostName = infoSocAddr.getHostName();

    // We have not completely restored the nodeManager, sessionManager and the
    // sessionNotifier
    if (recoverFromDisk) {
      nodeManager.restoreAfterSafeModeRestart();
      sessionManager.restoreAfterSafeModeRestart();
      sessionNotifier.restoreAfterSafeModeRestart();
    }

    nodeRestarter = new CoronaNodeRestarter(conf, nodeManager);
    nodeRestarter.start();

    setSafeMode(false);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:67,代码来源:ClusterManager.java

示例14: initSecondary

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
 * Initialize the webserver so that the primary namenode can fetch
 * transaction logs from standby via http.
 */
void initSecondary(Configuration conf) throws IOException {

  fsName = AvatarNode.getRemoteNamenodeHttpName(conf,
      avatarNode.getInstanceId());

  // Initialize other scheduling parameters from the configuration
  checkpointEnabled = conf.getBoolean("fs.checkpoint.enabled", false);
  checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
  checkpointTxnCount = NNStorageConfiguration.getCheckpointTxnCount(conf);
  delayedScheduledCheckpointTime = conf.getBoolean("fs.checkpoint.delayed",
      false) ? AvatarNode.now() + checkpointPeriod * 1000 : 0;
  

  // initialize the webserver for uploading files.
  String infoAddr = 
    NetUtils.getServerAddress(conf,
                              "dfs.secondary.info.bindAddress",
                              "dfs.secondary.info.port",
                              "dfs.secondary.http.address");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  String infoBindIpAddress = infoSocAddr.getAddress().getHostAddress();
  int tmpInfoPort = infoSocAddr.getPort();
  infoServer = new HttpServer("secondary", infoBindIpAddress, tmpInfoPort,
      tmpInfoPort == 0, conf);
  infoServer.setAttribute("name.system.image", fsImage);
  this.infoServer.setAttribute("name.conf", conf);
  infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
  infoServer.start();
  avatarNode.httpServer.setAttribute("avatar.node", avatarNode);
  avatarNode.httpServer.addInternalServlet("outstandingnodes",
      "/outstandingnodes", OutStandingDatanodesServlet.class);

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = infoServer.getPort();
  conf.set("dfs.secondary.http.address", infoBindIpAddress + ":" +infoPort);
  LOG.info("Secondary Web-server up at: " + infoBindIpAddress + ":" +infoPort);
  LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
           "(" + checkpointPeriod/60 + " min)");
  if (delayedScheduledCheckpointTime > 0) {
    LOG.warn("Standby: Checkpointing will be delayed by: " + checkpointPeriod + " seconds");
  }
  LOG.warn("Log Size Trigger    :" + checkpointTxnCount + " transactions.");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:48,代码来源:Standby.java

示例15: initialize

import org.apache.hadoop.http.HttpServer; //导入方法依赖的package包/类
/**
 * Initialize SecondaryNameNode.
 */
private void initialize(Configuration conf) throws IOException {
  // initiate Java VM metrics
  JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));
  
  // Create connection to the namenode.
  shouldRun = true;
  nameNodeAddr = NameNode.getClientProtocolAddress(conf);

  this.conf = conf;
  this.namenode =
      (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
          NamenodeProtocol.versionID, nameNodeAddr, conf);
  this.namenode.register();

  // initialize checkpoint directories
  fsName = getInfoServer();
  checkpointDirs = getFileStorageDirs(NNStorageConfiguration
      .getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary"));
  checkpointEditsDirs = getFileStorageDirs(NNStorageConfiguration
      .getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary")); 
  checkpointImage = new CheckpointStorage(conf);
  checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);

  // Initialize other scheduling parameters from the configuration
  checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
  checkpointTxnCount = NNStorageConfiguration.getCheckpointTxnCount(conf);

  // initialize the webserver for uploading files.
  String infoAddr = 
    NetUtils.getServerAddress(conf, 
                              "dfs.secondary.info.bindAddress",
                              "dfs.secondary.info.port",
                              "dfs.secondary.http.address");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  String infoBindIpAddress = infoSocAddr.getAddress().getHostAddress();
  int tmpInfoPort = infoSocAddr.getPort();
  infoServer = new HttpServer("secondary", infoBindIpAddress, tmpInfoPort,
      tmpInfoPort == 0, conf);
  infoServer.setAttribute("name.system.image", checkpointImage);
  this.infoServer.setAttribute("name.conf", conf);
  infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
  infoServer.start();

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = infoServer.getPort();
  conf.set("dfs.secondary.http.address", infoBindIpAddress + ":" +infoPort); 
  LOG.info("Secondary Web-server up at: " + infoBindIpAddress + ":" +infoPort);
  LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
           "(" + checkpointPeriod/60 + " min)");
  LOG.warn("Log Size Trigger    :" + checkpointTxnCount  + " transactions ");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:55,代码来源:SecondaryNameNode.java


注:本文中的org.apache.hadoop.http.HttpServer.setAttribute方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。