当前位置: 首页>>代码示例>>Java>>正文


Java ReadaheadPool.getInstance方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.ReadaheadPool.getInstance方法的典型用法代码示例。如果您正苦于以下问题:Java ReadaheadPool.getInstance方法的具体用法?Java ReadaheadPool.getInstance怎么用?Java ReadaheadPool.getInstance使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.ReadaheadPool的用法示例。


在下文中一共展示了ReadaheadPool.getInstance方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: startDataNode

import org.apache.hadoop.io.ReadaheadPool; //导入方法依赖的package包/类
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   AbstractList<File> dataDirs,
                  // DatanodeProtocol namenode,
                   SecureResources resources
                   ) throws IOException {
  if(UserGroupInformation.isSecurityEnabled() && resources == null) {
    if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
      throw new RuntimeException("Cannot start secure cluster without "
          + "privileged resources.");
    }
  }

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  this.dataDirs = dataDirs;
  this.conf = conf;
  this.dnConf = new DNConf(conf);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());

  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:49,代码来源:DataNode.java

示例2: startDataNode

import org.apache.hadoop.io.ReadaheadPool; //导入方法依赖的package包/类
/**
 * This method starts the data node with the specified conf.
 *
 * @param conf
 *     - the configuration
 *     if conf's CONFIG_PROPERTY_SIMULATED property is set
 *     then a simulated storage based data node is created.
 * @param dataDirs
 *     - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, AbstractList<File> dataDirs,
    // DatanodeProtocol namenode,
    SecureResources resources) throws IOException {
  if (UserGroupInformation.isSecurityEnabled() && resources == null) {
    if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
      throw new RuntimeException(
          "Cannot start secure cluster without " + "privileged resources.");
    }
  }

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  this.dataDirs = dataDirs;
  this.conf = conf;
  this.dnConf = new DNConf(conf);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());

  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:48,代码来源:DataNode.java

示例3: startDataNode

import org.apache.hadoop.io.ReadaheadPool; //导入方法依赖的package包/类
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   List<StorageLocation> dataDirs,
                  // DatanodeProtocol namenode,
                   SecureResources resources
                   ) throws IOException {
  if(UserGroupInformation.isSecurityEnabled() && resources == null) {
    if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
      throw new RuntimeException("Cannot start secure cluster without "
          + "privileged resources.");
    }
  }

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  this.dataDirs = dataDirs;
  this.conf = conf;
  this.dnConf = new DNConf(conf);

  if (dnConf.maxLockedMemory > 0) {
    if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
      throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) is greater than zero and native code is not available.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
    }
    long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
    if (dnConf.maxLockedMemory > ulimit) {
    throw new RuntimeException(String.format(
        "Cannot start datanode because the configured max locked memory" +
        " size (%s) of %d bytes is more than the datanode's available" +
        " RLIMIT_MEMLOCK ulimit of %d bytes.",
        DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        dnConf.maxLockedMemory,
        ulimit));
    }
  }
  LOG.info("Starting DataNode with maxLockedMemory = " +
      dnConf.maxLockedMemory);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());

  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:72,代码来源:DataNode.java


注:本文中的org.apache.hadoop.io.ReadaheadPool.getInstance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。