本文整理汇总了Java中org.apache.hadoop.util.Daemon类的典型用法代码示例。如果您正苦于以下问题:Java Daemon类的具体用法?Java Daemon怎么用?Java Daemon使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Daemon类属于org.apache.hadoop.util包,在下文中一共展示了Daemon类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: interruptAndJoin
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
void interruptAndJoin() throws InterruptedException {
Daemon daemonCopy = null;
synchronized (this) {
if (isRunning()) {
daemon.interrupt();
daemonCopy = daemon;
}
}
if (daemonCopy != null) {
if(LOG.isDebugEnabled()) {
LOG.debug("Wait for lease checker to terminate");
}
daemonCopy.join();
}
}
示例2: startExpiryDaemon
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private synchronized void startExpiryDaemon() {
// start daemon only if not already started
if (isDaemonStarted() == true) {
return;
}
daemon = new Daemon(new Runnable() {
@Override
public void run() {
try {
PeerCache.this.run();
} catch(InterruptedException e) {
//noop
} finally {
PeerCache.this.clear();
}
}
@Override
public String toString() {
return String.valueOf(PeerCache.this);
}
});
daemon.start();
}
示例3: recoverBlocks
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
public Daemon recoverBlocks(
final String who,
final Collection<RecoveringBlock> blocks) {
Daemon d = new Daemon(threadGroup, new Runnable() {
/** Recover a list of blocks. It is run by the primary datanode. */
@Override
public void run() {
for(RecoveringBlock b : blocks) {
try {
logRecoverBlock(who, b);
recoverBlock(b);
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED: " + b, e);
}
}
}
});
d.start();
return d;
}
示例4: testZeroLenReplicas
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
/**
* BlockRecoveryFI_07. max replica length from all DNs is zero.
*
* @throws IOException in case of an error
*/
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
DataNode spyDN = spy(dn);
doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
initReplicaRecovery(any(RecoveringBlock.class));
Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
d.join();
DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
verify(dnP).commitBlockSynchronization(
block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
示例5: startExpiryDaemon
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private synchronized void startExpiryDaemon() {
// start daemon only if not already started
if (isDaemonStarted()) {
return;
}
daemon = new Daemon(new Runnable() {
@Override
public void run() {
try {
PeerCache.this.run();
} catch(InterruptedException e) {
//noop
} finally {
PeerCache.this.clear();
}
}
@Override
public String toString() {
return String.valueOf(PeerCache.this);
}
});
daemon.start();
}
示例6: recoverBlocks
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
public Daemon recoverBlocks(final String who,
final Collection<RecoveringBlock> blocks) {
Daemon d = new Daemon(datanode.threadGroup, new Runnable() {
@Override
public void run() {
for(RecoveringBlock b : blocks) {
try {
logRecoverBlock(who, b);
if (b.isStriped()) {
new RecoveryTaskStriped((RecoveringStripedBlock) b).recover();
} else {
new RecoveryTaskContiguous(b).recover();
}
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED: " + b, e);
}
}
}
});
d.start();
return d;
}
示例7: initializeStripedReadThreadPool
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private void initializeStripedReadThreadPool(int num) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using striped reads; pool threads=" + num);
}
STRIPED_READ_THREAD_POOL = new ThreadPoolExecutor(1, num, 60,
TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
new Daemon.DaemonFactory() {
private final AtomicInteger threadIndex = new AtomicInteger(0);
@Override
public Thread newThread(Runnable r) {
Thread t = super.newThread(r);
t.setName("stripedRead-" + threadIndex.getAndIncrement());
return t;
}
}, new ThreadPoolExecutor.CallerRunsPolicy() {
@Override
public void rejectedExecution(Runnable runnable, ThreadPoolExecutor e) {
LOG.info("Execution for striped reading rejected, "
+ "Executing in current thread");
// will run in the current thread
super.rejectedExecution(runnable, e);
}
});
STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
}
示例8: initializeStripedBlkRecoveryThreadPool
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private void initializeStripedBlkRecoveryThreadPool(int num) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using striped block recovery; pool threads=" + num);
}
STRIPED_BLK_RECOVERY_THREAD_POOL = new ThreadPoolExecutor(2, num, 60,
TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(),
new Daemon.DaemonFactory() {
private final AtomicInteger threadIdx = new AtomicInteger(0);
@Override
public Thread newThread(Runnable r) {
Thread t = super.newThread(r);
t.setName("stripedBlockRecovery-" + threadIdx.getAndIncrement());
return t;
}
});
STRIPED_BLK_RECOVERY_THREAD_POOL.allowCoreThreadTimeOut(true);
}
示例9: main
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
/**
* main() has some simple utility methods.
* @param argv Command line parameters.
* @exception Exception if the filesystem does not exist.
*/
public static void main(String[] argv) throws Exception {
org.apache.hadoop.hdfs.DnsMonitorSecurityManager.setTheManager();
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
Configuration tconf = new Configuration();
try {
argv = DFSUtil.setGenericConf(argv, tconf);
} catch (IllegalArgumentException e) {
System.err.println(e.getMessage());
printUsage("");
return;
}
if (argv.length >= 1) {
SecondaryNameNode secondary = new SecondaryNameNode(tconf);
int ret = secondary.processArgs(argv);
System.exit(ret);
}
// Create a never ending deamon
Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf));
checkpointThread.start();
}
示例10: DirectoryScanner
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
DirectoryScanner(DataNode dn, FSDataset dataset, Configuration conf) {
this.datanode = dn;
this.dataset = dataset;
int interval = conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
scanPeriodMsecs = interval * 1000L; // msec
int numThreads = conf.getInt(DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT);
reportCompileThreadPool = Executors.newFixedThreadPool(numThreads,
new Daemon.DaemonFactory());
threadPoolExecutor = new ScheduledThreadPoolExecutor(1,
new Daemon.DaemonFactory());
this.delta = new FSDatasetDelta();
this.dataset.setDatasetDelta(delta);
}
示例11: initDataXceiver
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private void initDataXceiver(Configuration conf) throws IOException {
String address =
NetUtils.getServerAddress(conf,
"dfs.datanode.bindAddress",
"dfs.datanode.port",
FSConstants.DFS_DATANODE_ADDRESS_KEY);
InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
// find free port
ServerSocket ss = (socketWriteTimeout > 0) ?
ServerSocketChannel.open().socket() : new ServerSocket();
Server.bind(ss, socAddr,
conf.getInt("dfs.datanode.xceiver.listen.queue.size", 128));
ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE);
// adjust machine name with the actual port
int tmpPort = ss.getLocalPort();
selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
tmpPort);
LOG.info("Opened info server at " + tmpPort);
this.threadGroup = new ThreadGroup("dataXceiverServer");
this.dataXceiverServer = new Daemon(threadGroup,
new DataXceiverServer(ss, conf, this));
this.threadGroup.setDaemon(true); // auto destroy when empty
}
示例12: recoverBlocks
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
public Daemon recoverBlocks(final int namespaceId, final Block[] blocks,
final DatanodeInfo[][] targets, long processStartTime) {
final long deadline = processStartTime + blkRecoveryTimeout;
Daemon d = new Daemon(threadGroup, new Runnable() {
/** Recover a list of blocks. It is run by the primary datanode. */
public void run() {
for(int i = 0; i < blocks.length; i++) {
try {
logRecoverBlock("NameNode", namespaceId, blocks[i], targets[i]);
recoverBlock(namespaceId, blocks[i], false, targets[i], true, deadline);
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e);
}
}
}
});
d.start();
return d;
}
示例13: interruptAndJoin
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
void interruptAndJoin() throws InterruptedException {
Daemon daemonCopy = null;
synchronized (this) {
if (isRunning()) {
daemon.interrupt();
daemonCopy = daemon;
}
}
if (daemonCopy != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Wait for lease checker to terminate");
}
daemonCopy.join();
}
}
示例14: startExpiryDaemon
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
private synchronized void startExpiryDaemon() {
// start daemon only if not already started
if (isDaemonStarted() == true) {
return;
}
daemon = new Daemon(new Runnable() {
@Override
public void run() {
try {
SocketCache.this.run();
} catch(InterruptedException e) {
//noop
} finally {
SocketCache.this.clear();
}
}
@Override
public String toString() {
return String.valueOf(SocketCache.this);
}
});
daemon.start();
}
示例15: startActiveServices
import org.apache.hadoop.util.Daemon; //导入依赖的package包/类
/**
* Start services required in active state
*
* @throws IOException
*/
void startActiveServices() throws IOException {
LOG.info("Starting services required for active state");
LOG.info("Catching up to latest edits from old active before " +
"taking over writer role in edits logs");
blockManager.getDatanodeManager().markAllDatanodesStale();
if (isClusterInSafeMode()) {
if (!isInSafeMode() ||
(isInSafeMode() && safeMode.isPopulatingReplicationQueues())) {
LOG.info("Reprocessing replication and invalidation queues");
blockManager.processMisReplicatedBlocks();
}
}
leaseManager.startMonitor();
startSecretManagerIfNecessary();
//ResourceMonitor required only at ActiveNN. See HDFS-2914
this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
nnrmthread.start();
if (erasureCodingEnabled) {
erasureCodingManager.activate();
}
}