当前位置: 首页>>代码示例>>Java>>正文


Java Daemon类代码示例

本文整理汇总了Java中org.apache.hadoop.util.Daemon的典型用法代码示例。如果您正苦于以下问题:Java Daemon类的具体用法?Java Daemon怎么用?Java Daemon使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Daemon类属于org.apache.hadoop.util包,在下文中一共展示了Daemon类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: interruptAndJoin

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
void interruptAndJoin() throws InterruptedException {
  Daemon daemonCopy = null;
  synchronized (this) {
    if (isRunning()) {
      daemon.interrupt();
      daemonCopy = daemon;
    }
  }
 
  if (daemonCopy != null) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Wait for lease checker to terminate");
    }
    daemonCopy.join();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:LeaseRenewer.java

示例2: startExpiryDaemon

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private synchronized void startExpiryDaemon() {
  // start daemon only if not already started
  if (isDaemonStarted() == true) {
    return;
  }
  
  daemon = new Daemon(new Runnable() {
    @Override
    public void run() {
      try {
        PeerCache.this.run();
      } catch(InterruptedException e) {
        //noop
      } finally {
        PeerCache.this.clear();
      }
    }

    @Override
    public String toString() {
      return String.valueOf(PeerCache.this);
    }
  });
  daemon.start();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:PeerCache.java

示例3: recoverBlocks

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
public Daemon recoverBlocks(
    final String who,
    final Collection<RecoveringBlock> blocks) {
  
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          recoverBlock(b);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:DataNode.java

示例4: testZeroLenReplicas

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
      block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
      initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
  verify(dnP).commitBlockSynchronization(
      block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestBlockRecovery.java

示例5: startExpiryDaemon

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private synchronized void startExpiryDaemon() {
  // start daemon only if not already started
  if (isDaemonStarted()) {
    return;
  }

  daemon = new Daemon(new Runnable() {
    @Override
    public void run() {
      try {
        PeerCache.this.run();
      } catch(InterruptedException e) {
        //noop
      } finally {
        PeerCache.this.clear();
      }
    }

    @Override
    public String toString() {
      return String.valueOf(PeerCache.this);
    }
  });
  daemon.start();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:PeerCache.java

示例6: recoverBlocks

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
public Daemon recoverBlocks(final String who,
    final Collection<RecoveringBlock> blocks) {
  Daemon d = new Daemon(datanode.threadGroup, new Runnable() {
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          if (b.isStriped()) {
            new RecoveryTaskStriped((RecoveringStripedBlock) b).recover();
          } else {
            new RecoveryTaskContiguous(b).recover();
          }
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:BlockRecoveryWorker.java

示例7: initializeStripedReadThreadPool

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private void initializeStripedReadThreadPool(int num) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Using striped reads; pool threads=" + num);
  }
  STRIPED_READ_THREAD_POOL = new ThreadPoolExecutor(1, num, 60,
      TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
      new Daemon.DaemonFactory() {
    private final AtomicInteger threadIndex = new AtomicInteger(0);

    @Override
    public Thread newThread(Runnable r) {
      Thread t = super.newThread(r);
      t.setName("stripedRead-" + threadIndex.getAndIncrement());
      return t;
    }
  }, new ThreadPoolExecutor.CallerRunsPolicy() {
    @Override
    public void rejectedExecution(Runnable runnable, ThreadPoolExecutor e) {
      LOG.info("Execution for striped reading rejected, "
          + "Executing in current thread");
      // will run in the current thread
      super.rejectedExecution(runnable, e);
    }
  });
  STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:ErasureCodingWorker.java

示例8: initializeStripedBlkRecoveryThreadPool

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private void initializeStripedBlkRecoveryThreadPool(int num) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Using striped block recovery; pool threads=" + num);
  }
  STRIPED_BLK_RECOVERY_THREAD_POOL = new ThreadPoolExecutor(2, num, 60,
      TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(),
      new Daemon.DaemonFactory() {
        private final AtomicInteger threadIdx = new AtomicInteger(0);

        @Override
        public Thread newThread(Runnable r) {
          Thread t = super.newThread(r);
          t.setName("stripedBlockRecovery-" + threadIdx.getAndIncrement());
          return t;
        }
      });
  STRIPED_BLK_RECOVERY_THREAD_POOL.allowCoreThreadTimeOut(true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:ErasureCodingWorker.java

示例9: main

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
/**
 * main() has some simple utility methods.
 * @param argv Command line parameters.
 * @exception Exception if the filesystem does not exist.
 */
public static void main(String[] argv) throws Exception {
  org.apache.hadoop.hdfs.DnsMonitorSecurityManager.setTheManager();
  StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
  Configuration tconf = new Configuration();
  try {
    argv = DFSUtil.setGenericConf(argv, tconf);
  } catch (IllegalArgumentException e) {
    System.err.println(e.getMessage());
    printUsage("");
    return;
  }
  if (argv.length >= 1) {
    SecondaryNameNode secondary = new SecondaryNameNode(tconf);
    int ret = secondary.processArgs(argv);
    System.exit(ret);
  }

  // Create a never ending deamon
  Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); 
  checkpointThread.start();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:SecondaryNameNode.java

示例10: DirectoryScanner

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
DirectoryScanner(DataNode dn, FSDataset dataset, Configuration conf) {
  this.datanode = dn;
  this.dataset = dataset;
  int interval = conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
      DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
  scanPeriodMsecs = interval * 1000L; // msec
  int numThreads = conf.getInt(DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
      DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT);

  reportCompileThreadPool = Executors.newFixedThreadPool(numThreads,
      new Daemon.DaemonFactory());
  threadPoolExecutor = new ScheduledThreadPoolExecutor(1,
      new Daemon.DaemonFactory());
  this.delta = new FSDatasetDelta();

  this.dataset.setDatasetDelta(delta);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:DirectoryScanner.java

示例11: initDataXceiver

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private void initDataXceiver(Configuration conf) throws IOException {
  String address = 
    NetUtils.getServerAddress(conf,
                      "dfs.datanode.bindAddress",
                      "dfs.datanode.port",
                      FSConstants.DFS_DATANODE_ADDRESS_KEY);
  InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
  // find free port
  ServerSocket ss = (socketWriteTimeout > 0) ? 
        ServerSocketChannel.open().socket() : new ServerSocket();
  Server.bind(ss, socAddr, 
      conf.getInt("dfs.datanode.xceiver.listen.queue.size", 128));
  ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); 
  // adjust machine name with the actual port
  int tmpPort = ss.getLocalPort();
  selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
                                   tmpPort);
  LOG.info("Opened info server at " + tmpPort);
    
  this.threadGroup = new ThreadGroup("dataXceiverServer");
  this.dataXceiverServer = new Daemon(threadGroup, 
      new DataXceiverServer(ss, conf, this));
  this.threadGroup.setDaemon(true); // auto destroy when empty
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:DataNode.java

示例12: recoverBlocks

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
public Daemon recoverBlocks(final int namespaceId, final Block[] blocks,
    final DatanodeInfo[][] targets, long processStartTime) {
  final long deadline = processStartTime + blkRecoveryTimeout;
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    public void run() {
      for(int i = 0; i < blocks.length; i++) {
        try {
          logRecoverBlock("NameNode", namespaceId, blocks[i], targets[i]);
          recoverBlock(namespaceId, blocks[i], false, targets[i], true, deadline);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:DataNode.java

示例13: interruptAndJoin

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
void interruptAndJoin() throws InterruptedException {
  Daemon daemonCopy = null;
  synchronized (this) {
    if (isRunning()) {
      daemon.interrupt();
      daemonCopy = daemon;
    }
  }

  if (daemonCopy != null) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Wait for lease checker to terminate");
    }
    daemonCopy.join();
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:17,代码来源:LeaseRenewer.java

示例14: startExpiryDaemon

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private synchronized void startExpiryDaemon() {
  // start daemon only if not already started
  if (isDaemonStarted() == true) {
    return;
  }
  
  daemon = new Daemon(new Runnable() {
    @Override
    public void run() {
      try {
        SocketCache.this.run();
      } catch(InterruptedException e) {
        //noop
      } finally {
        SocketCache.this.clear();
      }
    }

    @Override
    public String toString() {
      return String.valueOf(SocketCache.this);
    }
  });
  daemon.start();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:SocketCache.java

示例15: startActiveServices

import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
/**
 * Start services required in active state
 *
 * @throws IOException
 */
void startActiveServices() throws IOException {
  LOG.info("Starting services required for active state");
  LOG.info("Catching up to latest edits from old active before " +
      "taking over writer role in edits logs");
  blockManager.getDatanodeManager().markAllDatanodesStale();

  if (isClusterInSafeMode()) {
    if (!isInSafeMode() ||
        (isInSafeMode() && safeMode.isPopulatingReplicationQueues())) {
      LOG.info("Reprocessing replication and invalidation queues");
      blockManager.processMisReplicatedBlocks();
    }
  }

  leaseManager.startMonitor();
  startSecretManagerIfNecessary();

  //ResourceMonitor required only at ActiveNN. See HDFS-2914
  this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
  nnrmthread.start();

  if (erasureCodingEnabled) {
    erasureCodingManager.activate();
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:31,代码来源:FSNamesystem.java


注:本文中的org.apache.hadoop.util.Daemon类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。