当前位置: 首页>>代码示例>>Java>>正文


Java Daemon.DaemonFactory方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.Daemon.DaemonFactory方法的典型用法代码示例。如果您正苦于以下问题:Java Daemon.DaemonFactory方法的具体用法?Java Daemon.DaemonFactory怎么用?Java Daemon.DaemonFactory使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.Daemon的用法示例。


在下文中一共展示了Daemon.DaemonFactory方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initializeStripedReadThreadPool

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
private void initializeStripedReadThreadPool(int num) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Using striped reads; pool threads=" + num);
  }
  STRIPED_READ_THREAD_POOL = new ThreadPoolExecutor(1, num, 60,
      TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
      new Daemon.DaemonFactory() {
    private final AtomicInteger threadIndex = new AtomicInteger(0);

    @Override
    public Thread newThread(Runnable r) {
      Thread t = super.newThread(r);
      t.setName("stripedRead-" + threadIndex.getAndIncrement());
      return t;
    }
  }, new ThreadPoolExecutor.CallerRunsPolicy() {
    @Override
    public void rejectedExecution(Runnable runnable, ThreadPoolExecutor e) {
      LOG.info("Execution for striped reading rejected, "
          + "Executing in current thread");
      // will run in the current thread
      super.rejectedExecution(runnable, e);
    }
  });
  STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:ErasureCodingWorker.java

示例2: initializeStripedBlkRecoveryThreadPool

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
private void initializeStripedBlkRecoveryThreadPool(int num) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Using striped block recovery; pool threads=" + num);
  }
  STRIPED_BLK_RECOVERY_THREAD_POOL = new ThreadPoolExecutor(2, num, 60,
      TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(),
      new Daemon.DaemonFactory() {
        private final AtomicInteger threadIdx = new AtomicInteger(0);

        @Override
        public Thread newThread(Runnable r) {
          Thread t = super.newThread(r);
          t.setName("stripedBlockRecovery-" + threadIdx.getAndIncrement());
          return t;
        }
      });
  STRIPED_BLK_RECOVERY_THREAD_POOL.allowCoreThreadTimeOut(true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:ErasureCodingWorker.java

示例3: DirectoryScanner

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
DirectoryScanner(DataNode dn, FSDataset dataset, Configuration conf) {
  this.datanode = dn;
  this.dataset = dataset;
  int interval = conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
      DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
  scanPeriodMsecs = interval * 1000L; // msec
  int numThreads = conf.getInt(DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
      DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT);

  reportCompileThreadPool = Executors.newFixedThreadPool(numThreads,
      new Daemon.DaemonFactory());
  threadPoolExecutor = new ScheduledThreadPoolExecutor(1,
      new Daemon.DaemonFactory());
  this.delta = new FSDatasetDelta();

  this.dataset.setDatasetDelta(delta);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:DirectoryScanner.java

示例4: DirectoryScanner

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
DirectoryScanner(DataNode datanode, FsDatasetSpi<?> dataset, Configuration conf) {
  this.datanode = datanode;
  this.dataset = dataset;
  int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
      DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
  scanPeriodMsecs = interval * 1000L; //msec
  int threads = 
      conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
                  DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT);

  reportCompileThreadPool = Executors.newFixedThreadPool(threads, 
      new Daemon.DaemonFactory());
  masterThread = new ScheduledThreadPoolExecutor(1,
      new Daemon.DaemonFactory());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:DirectoryScanner.java

示例5: initThreadsNumForHedgedReads

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/**
 * Create hedged reads thread pool, HEDGED_READ_THREAD_POOL, if
 * it does not already exist.
 * @param num Number of threads for hedged reads thread pool.
 * If zero, skip hedged reads thread pool creation.
 */
private synchronized void initThreadsNumForHedgedReads(int num) {
  if (num <= 0 || HEDGED_READ_THREAD_POOL != null) return;
  HEDGED_READ_THREAD_POOL = new ThreadPoolExecutor(1, num, 60,
      TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
      new Daemon.DaemonFactory() {
        private final AtomicInteger threadIndex = new AtomicInteger(0);
        @Override
        public Thread newThread(Runnable r) {
          Thread t = super.newThread(r);
          t.setName("hedgedRead-" + threadIndex.getAndIncrement());
          return t;
        }
      },
      new ThreadPoolExecutor.CallerRunsPolicy() {
        @Override
        public void rejectedExecution(Runnable runnable,
            ThreadPoolExecutor e) {
          LOG.info("Execution rejected, Executing in current thread");
          HEDGED_READ_METRIC.incHedgedReadOpsInCurThread();
          // will run in the current thread
          super.rejectedExecution(runnable, e);
        }
      });
  HEDGED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
  LOG.debug("Using hedged reads; pool threads={}", num);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:DFSClient.java

示例6: initThreadsNumForHedgedReads

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/**
 * Create hedged reads thread pool, HEDGED_READ_THREAD_POOL, if
 * it does not already exist.
 * @param num Number of threads for hedged reads thread pool.
 * If zero, skip hedged reads thread pool creation.
 */
private synchronized void initThreadsNumForHedgedReads(int num) {
  if (num <= 0 || HEDGED_READ_THREAD_POOL != null) return;
  HEDGED_READ_THREAD_POOL = new ThreadPoolExecutor(1, num, 60,
      TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
      new Daemon.DaemonFactory() {
        private final AtomicInteger threadIndex =
          new AtomicInteger(0); 
        @Override
        public Thread newThread(Runnable r) {
          Thread t = super.newThread(r);
          t.setName("hedgedRead-" +
            threadIndex.getAndIncrement());
          return t;
        }
      },
      new ThreadPoolExecutor.CallerRunsPolicy() {

    @Override
    public void rejectedExecution(Runnable runnable,
        ThreadPoolExecutor e) {
      LOG.info("Execution rejected, Executing in current thread");
      HEDGED_READ_METRIC.incHedgedReadOpsInCurThread();
      // will run in the current thread
      super.rejectedExecution(runnable, e);
    }
  });
  HEDGED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Using hedged reads; pool threads=" + num);
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:38,代码来源:DFSClient.java

示例7: DirectoryScanner

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
DirectoryScanner(FsDatasetSpi<?> dataset, Configuration conf) {
  this.dataset = dataset;
  int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
      DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
  scanPeriodMsecs = interval * 1000L; //msec
  int threads = 
      conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
                  DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT);

  reportCompileThreadPool = Executors.newFixedThreadPool(threads, 
      new Daemon.DaemonFactory());
  masterThread = new ScheduledThreadPoolExecutor(1,
      new Daemon.DaemonFactory());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:15,代码来源:DirectoryScanner.java

示例8: DirectoryScanner

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
DirectoryScanner(FsDatasetSpi<?> dataset, Configuration conf) {
  this.dataset = dataset;
  int interval =
      conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
          DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
  scanPeriodMsecs = interval * 1000L; //msec
  int threads =
      conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
          DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT);

  reportCompileThreadPool =
      Executors.newFixedThreadPool(threads, new Daemon.DaemonFactory());
  masterThread =
      new ScheduledThreadPoolExecutor(1, new Daemon.DaemonFactory());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:16,代码来源:DirectoryScanner.java

示例9: DirectoryScanner

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/**
 * Create a new directory scanner, but don't cycle it running yet.
 *
 * @param datanode the parent datanode
 * @param dataset the dataset to scan
 * @param conf the Configuration object
 */
DirectoryScanner(DataNode datanode, FsDatasetSpi<?> dataset, Configuration conf) {
  this.datanode = datanode;
  this.dataset = dataset;
  int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
      DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
  scanPeriodMsecs = interval * MILLIS_PER_SECOND; //msec

  int throttle =
      conf.getInt(
        DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_KEY,
        DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_DEFAULT);

  if ((throttle > MILLIS_PER_SECOND) || (throttle <= 0)) {
    if (throttle > MILLIS_PER_SECOND) {
      LOG.error(
          DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_KEY
          + " set to value above 1000 ms/sec. Assuming default value of " +
          DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_DEFAULT);
    } else {
      LOG.error(
          DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_KEY
          + " set to value below 1 ms/sec. Assuming default value of " +
          DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_DEFAULT);
    }

    throttleLimitMsPerSec =
        DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_DEFAULT;
  } else {
    throttleLimitMsPerSec = throttle;
  }

  int threads = 
      conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
                  DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT);

  reportCompileThreadPool = Executors.newFixedThreadPool(threads, 
      new Daemon.DaemonFactory());
  masterThread = new ScheduledThreadPoolExecutor(1,
      new Daemon.DaemonFactory());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:48,代码来源:DirectoryScanner.java


注:本文中的org.apache.hadoop.util.Daemon.DaemonFactory方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。