本文整理汇总了Java中org.apache.hadoop.util.Daemon.start方法的典型用法代码示例。如果您正苦于以下问题:Java Daemon.start方法的具体用法?Java Daemon.start怎么用?Java Daemon.start使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.util.Daemon
的用法示例。
在下文中一共展示了Daemon.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: startExpiryDaemon
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
private synchronized void startExpiryDaemon() {
// start daemon only if not already started
if (isDaemonStarted() == true) {
return;
}
daemon = new Daemon(new Runnable() {
@Override
public void run() {
try {
PeerCache.this.run();
} catch(InterruptedException e) {
//noop
} finally {
PeerCache.this.clear();
}
}
@Override
public String toString() {
return String.valueOf(PeerCache.this);
}
});
daemon.start();
}
示例2: recoverBlocks
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public Daemon recoverBlocks(
final String who,
final Collection<RecoveringBlock> blocks) {
Daemon d = new Daemon(threadGroup, new Runnable() {
/** Recover a list of blocks. It is run by the primary datanode. */
@Override
public void run() {
for(RecoveringBlock b : blocks) {
try {
logRecoverBlock(who, b);
recoverBlock(b);
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED: " + b, e);
}
}
}
});
d.start();
return d;
}
示例3: startExpiryDaemon
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
private synchronized void startExpiryDaemon() {
// start daemon only if not already started
if (isDaemonStarted()) {
return;
}
daemon = new Daemon(new Runnable() {
@Override
public void run() {
try {
PeerCache.this.run();
} catch(InterruptedException e) {
//noop
} finally {
PeerCache.this.clear();
}
}
@Override
public String toString() {
return String.valueOf(PeerCache.this);
}
});
daemon.start();
}
示例4: recoverBlocks
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public Daemon recoverBlocks(final String who,
final Collection<RecoveringBlock> blocks) {
Daemon d = new Daemon(datanode.threadGroup, new Runnable() {
@Override
public void run() {
for(RecoveringBlock b : blocks) {
try {
logRecoverBlock(who, b);
if (b.isStriped()) {
new RecoveryTaskStriped((RecoveringStripedBlock) b).recover();
} else {
new RecoveryTaskContiguous(b).recover();
}
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED: " + b, e);
}
}
}
});
d.start();
return d;
}
示例5: recoverBlocks
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public Daemon recoverBlocks(final String who,
final Collection<RecoveringBlock> blocks) {
Daemon d = new Daemon(threadGroup, new Runnable() {
/** Recover a list of blocks. It is run by the primary datanode. */
@Override
public void run() {
for (RecoveringBlock b : blocks) {
try {
logRecoverBlock(who, b);
recoverBlock(b);
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED: " + b, e);
}
}
}
});
d.start();
return d;
}
示例6: recoverBlocks
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public Daemon recoverBlocks(final int namespaceId, final Block[] blocks,
final DatanodeInfo[][] targets, long processStartTime) {
final long deadline = processStartTime + blkRecoveryTimeout;
Daemon d = new Daemon(threadGroup, new Runnable() {
/** Recover a list of blocks. It is run by the primary datanode. */
public void run() {
for(int i = 0; i < blocks.length; i++) {
try {
logRecoverBlock("NameNode", namespaceId, blocks[i], targets[i]);
recoverBlock(namespaceId, blocks[i], false, targets[i], true, deadline);
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e);
}
}
}
});
d.start();
return d;
}
示例7: triggerFailover
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/**
* Triggers failover processing for safe mode and blocks until we have left
* safe mode.
*
* @throws IOException
*/
protected void triggerFailover() throws IOException {
clearDataStructures();
for (DatanodeInfo node : namesystem.datanodeReport(DatanodeReportType.LIVE)) {
liveDatanodes.add(node);
outStandingHeartbeats.add(node);
}
safeModeState = SafeModeState.FAILOVER_IN_PROGRESS;
safeModeMonitor = new Daemon(new SafeModeMonitor(namesystem, this));
safeModeMonitor.start();
try {
safeModeMonitor.join();
} catch (InterruptedException ie) {
throw new IOException("triggerSafeMode() interruped()");
}
if (safeModeState != SafeModeState.AFTER_FAILOVER) {
throw new RuntimeException("safeModeState is : " + safeModeState +
" which does not indicate a successfull exit of safemode");
}
}
示例8: checkMode
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/**
* Check and trigger safe mode if needed.
*/
private void checkMode() {
if (needEnter()) {
enter();
reportStatus("STATE* Safe mode ON.", false);
return;
}
// the threshold is reached
if (!isOn() || // safe mode is off
extension <= 0 || threshold <= 0) { // don't need to wait
this.leave(true); // leave safe mode
return;
}
if (reached > 0) { // threshold has already been reached before
reportStatus("STATE* Safe mode ON.", false);
return;
}
// start monitor
reached = now();
smmthread = new Daemon(new SafeModeMonitor());
smmthread.start();
reportStatus("STATE* Safe mode extension entered.", true);
}
示例9: recoverBlocks
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public Daemon recoverBlocks(final Block[] blocks, final DatanodeInfo[][] targets) {
Daemon d = new Daemon(threadGroup, new Runnable() {
/** Recover a list of blocks. It is run by the primary datanode. */
public void run() {
for(int i = 0; i < blocks.length; i++) {
try {
logRecoverBlock("NameNode", blocks[i], targets[i]);
recoverBlock(blocks[i], false, targets[i], true);
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e);
}
}
}
});
d.start();
return d;
}
示例10: main
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/**
* main() has some simple utility methods.
* @param argv Command line parameters.
* @exception Exception if the filesystem does not exist.
*/
public static void main(String[] argv) throws Exception {
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
Configuration tconf = new Configuration();
try {
argv = DFSUtil.setGenericConf(argv, tconf);
} catch (IllegalArgumentException e) {
System.err.println(e.getMessage());
printUsage("");
return;
}
if (argv.length >= 1) {
SecondaryNameNode secondary = new SecondaryNameNode(tconf);
int ret = secondary.processArgs(argv);
System.exit(ret);
}
// Create a never ending deamon
Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf));
checkpointThread.start();
}
示例11: startThreads
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/** should be called before this object is used */
public void startThreads() throws IOException {
Preconditions.checkState(!running);
updateCurrentKey();
synchronized (this) {
running = true;
tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
tokenRemoverThread.start();
}
}
示例12: put
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public synchronized void put(final long inodeId, final DFSOutputStream out,
final DFSClient dfsc) {
if (dfsc.isClientRunning()) {
if (!isRunning() || isRenewerExpired()) {
//start a new deamon with a new id.
final int id = ++currentId;
daemon = new Daemon(new Runnable() {
@Override
public void run() {
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Lease renewer daemon for " + clientsString()
+ " with renew id " + id + " started");
}
LeaseRenewer.this.run(id);
} catch(InterruptedException e) {
LOG.debug("LeaseRenewer is interrupted.", e);
} finally {
synchronized(LeaseRenewer.this) {
Factory.INSTANCE.remove(LeaseRenewer.this);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Lease renewer daemon for " + clientsString()
+ " with renew id " + id + " exited");
}
}
}
@Override
public String toString() {
return String.valueOf(LeaseRenewer.this);
}
});
daemon.start();
}
dfsc.putFileBeingWritten(inodeId, out);
emptyTime = Long.MAX_VALUE;
}
}
示例13: tryStart
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
void tryStart() {
final Thread current = Thread.currentThread();
if (running.compareAndSet(null, current)) {
final Daemon daemon = new Daemon() {
@Override
public void run() {
for (; isRunning(this);) {
final long waitTime = checkCalls();
tryStop(this);
try {
synchronized (AsyncCallHandler.this) {
AsyncCallHandler.this.wait(waitTime);
}
} catch (InterruptedException e) {
kill(this);
}
}
}
};
final boolean set = running.compareAndSet(current, daemon);
Preconditions.checkState(set);
if (LOG.isDebugEnabled()) {
LOG.debug("Starting AsyncCallQueue.Processor " + daemon);
}
daemon.start();
}
}
示例14: triggerFailover
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/**
* Triggers failover processing for safe mode and blocks until we have left
* safe mode.
*
* @throws IOException
*/
protected void triggerFailover() throws IOException {
clearDataStructures();
// stop sending PREPARE_FAILOVER command
// we are performing failover now
prepareFailover = false;
for (DatanodeInfo node : namesystem.datanodeReport(DatanodeReportType.LIVE)) {
liveDatanodes.add(node);
outStandingHeartbeats.add(node);
}
InjectionHandler
.processEvent(InjectionEvent.STANDBY_ENTER_SAFE_MODE);
safeModeState = SafeModeState.FAILOVER_IN_PROGRESS;
InjectionHandler.processEvent(InjectionEvent.STANDBY_FAILOVER_INPROGRESS);
safeModeMonitor = new Daemon(new SafeModeMonitor(namesystem, this));
safeModeMonitor.start();
try {
safeModeMonitor.join();
} catch (InterruptedException ie) {
throw new IOException("triggerSafeMode() interruped()");
}
if (safeModeState != SafeModeState.AFTER_FAILOVER) {
throw new IOException("safeModeState is : " + safeModeState +
" which does not indicate a successfull exit of safemode");
}
}
示例15: initialize
import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
protected void initialize() {
// How often do we mirror the information from Collector
mirrorPeriod = conf.getLong("mapred.resourceutilization.mirrorperiod",
DEFAULT_MIRROR_PERIOD);
// Make connection to the Collector
connect();
mirrorDaemon = new Daemon(new MirrorRun());
mirrorDaemon.start();
}