本文整理汇总了Java中org.apache.hadoop.util.ShutdownHookManager类的典型用法代码示例。如果您正苦于以下问题:Java ShutdownHookManager类的具体用法?Java ShutdownHookManager怎么用?Java ShutdownHookManager使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ShutdownHookManager类属于org.apache.hadoop.util包,在下文中一共展示了ShutdownHookManager类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deleteOnExit
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
/**
* Mark a path to be deleted on JVM shutdown.
*
* @param f the existing path to delete.
*
* @return true if deleteOnExit is successful, otherwise false.
*
* @throws AccessControlException If access is denied
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public boolean deleteOnExit(Path f) throws AccessControlException,
IOException {
if (!this.util().exists(f)) {
return false;
}
synchronized (DELETE_ON_EXIT) {
if (DELETE_ON_EXIT.isEmpty()) {
ShutdownHookManager.get().addShutdownHook(FINALIZER, SHUTDOWN_HOOK_PRIORITY);
}
Set<Path> set = DELETE_ON_EXIT.get(this);
if (set == null) {
set = new TreeSet<Path>();
DELETE_ON_EXIT.put(this, set);
}
set.add(f);
}
return true;
}
示例2: main
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
public static void main(String[] args) {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(SharedCacheManager.class, args, LOG);
try {
Configuration conf = new YarnConfiguration();
SharedCacheManager sharedCacheManager = new SharedCacheManager();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(sharedCacheManager),
SHUTDOWN_HOOK_PRIORITY);
sharedCacheManager.init(conf);
sharedCacheManager.start();
} catch (Throwable t) {
LOG.fatal("Error starting SharedCacheManager", t);
System.exit(-1);
}
}
示例3: shutDown
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
protected void shutDown() {
new Thread() {
@Override
public void run() {
try {
NodeManager.this.stop();
} catch (Throwable t) {
LOG.error("Error while shutting down NodeManager", t);
} finally {
if (shouldExitOnShutdownEvent
&& !ShutdownHookManager.get().isShutdownInProgress()) {
ExitUtil.terminate(-1);
}
}
}
}.start();
}
示例4: initAndStartNodeManager
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) {
try {
// Remove the old hook if we are rebooting.
if (hasToReboot && null != nodeManagerShutdownHook) {
ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook);
}
nodeManagerShutdownHook = new CompositeServiceShutdownHook(this);
ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook,
SHUTDOWN_HOOK_PRIORITY);
// System exit should be called only when NodeManager is instantiated from
// main() funtion
this.shouldExitOnShutdownEvent = true;
this.init(conf);
this.start();
} catch (Throwable t) {
LOG.fatal("Error starting NodeManager", t);
System.exit(-1);
}
}
示例5: main
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
public static void main(String argv[]) {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(ResourceManager.class, argv, LOG);
try {
Configuration conf = new YarnConfiguration();
GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
argv = hParser.getRemainingArgs();
// If -format-state-store, then delete RMStateStore; else startup normally
if (argv.length == 1 && argv[0].equals("-format-state-store")) {
deleteRMStateStore(conf);
} else {
ResourceManager resourceManager = new ResourceManager();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(resourceManager),
SHUTDOWN_HOOK_PRIORITY);
resourceManager.init(conf);
resourceManager.start();
}
} catch (Throwable t) {
LOG.fatal("Error starting ResourceManager", t);
System.exit(-1);
}
}
示例6: launchAppHistoryServer
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
static ApplicationHistoryServer launchAppHistoryServer(String[] args) {
Thread
.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args,
LOG);
ApplicationHistoryServer appHistoryServer = null;
try {
appHistoryServer = new ApplicationHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(appHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration();
new GenericOptionsParser(conf, args);
appHistoryServer.init(conf);
appHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting ApplicationHistoryServer", t);
ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
}
return appHistoryServer;
}
示例7: reportFatalError
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
/**
* Report a fatal error to the parent (task) tracker.
*/
protected void reportFatalError(TaskAttemptID id, Throwable throwable,
String logMsg) {
LOG.fatal(logMsg);
if (ShutdownHookManager.get().isShutdownInProgress()) {
return;
}
Throwable tCause = throwable.getCause();
String cause = tCause == null
? StringUtils.stringifyException(throwable)
: StringUtils.stringifyException(tCause);
try {
umbilical.fatalError(id, cause);
} catch (IOException ioe) {
LOG.fatal("Failed to contact the tasktracker", ioe);
System.exit(-1);
}
}
示例8: launchJobHistoryServer
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
static JobHistoryServer launchJobHistoryServer(String[] args) {
Thread.
setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
JobHistoryServer jobHistoryServer = null;
try {
jobHistoryServer = new JobHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(jobHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration(new JobConf());
new GenericOptionsParser(conf, args);
jobHistoryServer.init(conf);
jobHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting JobHistoryServer", t);
ExitUtil.terminate(-1, "Error starting JobHistoryServer");
}
return jobHistoryServer;
}
示例9: DFSClientCache
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
DFSClientCache(NfsConfiguration config, int clientCache) {
this.config = config;
this.clientCache = CacheBuilder.newBuilder()
.maximumSize(clientCache)
.removalListener(clientRemovalListener())
.build(clientLoader());
this.inputstreamCache = CacheBuilder.newBuilder()
.maximumSize(DEFAULT_DFS_INPUTSTREAM_CACHE_SIZE)
.expireAfterAccess(DEFAULT_DFS_INPUTSTREAM_CACHE_TTL, TimeUnit.SECONDS)
.removalListener(inputStreamRemovalListener())
.build(inputStreamLoader());
ShutdownHookManager.get().addShutdownHook(new CacheFinalizer(),
SHUTDOWN_HOOK_PRIORITY);
}
示例10: main
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
/**
* Main function of the DistCp program. Parses the input arguments (via OptionsParser),
* and invokes the DistCp::run() method, via the ToolRunner.
* @param argv Command-line arguments sent to DistCp.
*/
public static void main(String argv[]) {
int exitCode;
try {
DistCp distCp = new DistCp();
Cleanup CLEANUP = new Cleanup(distCp);
ShutdownHookManager.get().addShutdownHook(CLEANUP,
SHUTDOWN_HOOK_PRIORITY);
exitCode = ToolRunner.run(getDefaultConf(), distCp, argv);
}
catch (Exception e) {
LOG.error("Couldn't complete DistCp operation: ", e);
exitCode = DistCpConstants.UNKNOWN_ERROR;
}
System.exit(exitCode);
}
示例11: get
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
public static SpanReceiverHost get(Configuration conf, String confPrefix) {
synchronized (SpanReceiverHost.class) {
SpanReceiverHost host = hosts.get(confPrefix);
if (host != null) {
return host;
}
final SpanReceiverHost newHost = new SpanReceiverHost(confPrefix);
newHost.loadSpanReceivers(conf);
ShutdownHookManager.get().addShutdownHook(new Runnable() {
public void run() {
newHost.closeReceivers();
}
}, 0);
hosts.put(confPrefix, newHost);
return newHost;
}
}
示例12: main
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
public static void main(String[] args) {
try {
Configuration conf = new YarnConfiguration();
String appId = args[0];
String appUser = args[1];
HPCLogAggregateHandler aggregateHandler = new HPCLogAggregateHandler(
appId, appUser);
initAndStartAggregation(conf, appUser, aggregateHandler);
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(aggregateHandler),
SHUTDOWN_HOOK_PRIORITY);
} catch (Throwable t) {
LOG.fatal("Error starting Log Aggregation", t);
ExitUtil.terminate(1, t);
}
}
示例13: main
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
public static void main(String argv[]) {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(ResourceManager.class, argv, LOG);
try {
Configuration conf = new YarnConfiguration();
GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
argv = hParser.getRemainingArgs();
// If -format-state-store, then delete RMStateStore; else startup normally
if (argv.length == 1 && argv[0].equals("-format-state-store")) {
deleteRMStateStore(conf);
} else {
ResourceManager resourceManager = new ResourceManager();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(resourceManager),
SHUTDOWN_HOOK_PRIORITY);
resourceManager.init(conf);
resourceManager.start();
}
} catch (Throwable t) {
LOG.fatal("Error starting ResourceManager", t);
System.exit(-1);
}
}
示例14: stop
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
@Override
public void stop(BundleContext context) throws Exception {
// There's problem inside OSGi when framwork is being shutdown
// hadoop.fs code registers some JVM shutdown hooks throughout the code and this ordered
// list of hooks is run in shutdown thread.
// At that time bundle class loader / bundle wiring is no longer valid (bundle is stopped)
// so ShutdownHookManager can't load additional classes. But there are some inner classes
// loaded when iterating over registered hadoop shutdown hooks.
// Let's explicitely load these inner classes when bundle is stopped, as there's last chance
// to use valid bundle class loader.
// This is based on the knowledge of what's contained in SMX bundle
// org.apache.servicemix.bundles.hadoop-client-*.jar
// the above is just a warning that hadopp may have some quirks when running inside OSGi
ClassLoader hadoopCl = ShutdownHookManager.class.getClassLoader();
if (hadoopCl != null) {
String shm = ShutdownHookManager.class.getName();
hadoopCl.loadClass(shm + "$1");
hadoopCl.loadClass(shm + "$2");
hadoopCl.loadClass(shm + "$HookEntry");
}
}
示例15: initAndStartNodeManager
import org.apache.hadoop.util.ShutdownHookManager; //导入依赖的package包/类
private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) {
try {
// Remove the old hook if we are rebooting.
if (hasToReboot && null != nodeManagerShutdownHook) {
ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook);
}
nodeManagerShutdownHook = new CompositeServiceShutdownHook(this);
ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook,
SHUTDOWN_HOOK_PRIORITY);
this.init(conf);
this.start();
} catch (Throwable t) {
LOG.fatal("Error starting NodeManager", t);
System.exit(-1);
}
}