本文整理汇总了Java中org.apache.hadoop.metrics2.lib.DefaultMetricsSystem.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Java DefaultMetricsSystem.shutdown方法的具体用法?Java DefaultMetricsSystem.shutdown怎么用?Java DefaultMetricsSystem.shutdown使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.metrics2.lib.DefaultMetricsSystem
的用法示例。
在下文中一共展示了DefaultMetricsSystem.shutdown方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testStartStopStart
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@Test public void testStartStopStart() {
DefaultMetricsSystem.shutdown(); // Clear pre-existing source names.
MetricsSystemImpl ms = new MetricsSystemImpl("test");
TestSource ts = new TestSource("ts");
ms.start();
ms.register("ts", "", ts);
MetricsSourceAdapter sa = ms.getSourceAdapter("ts");
assertNotNull(sa);
assertNotNull(sa.getMBeanName());
ms.stop();
ms.shutdown();
ms.start();
sa = ms.getSourceAdapter("ts");
assertNotNull(sa);
assertNotNull(sa.getMBeanName());
ms.stop();
ms.shutdown();
}
示例2: serviceStop
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@Override
protected void serviceStop() throws Exception {
DefaultMetricsSystem.shutdown();
if (rmContext != null) {
RMStateStore store = rmContext.getStateStore();
try {
store.close();
} catch (Exception e) {
LOG.error("Error closing store.", e);
}
}
super.serviceStop();
}
示例3: setUp
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
configuration = new Configuration();
UserGroupInformation.setConfiguration(configuration);
configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
+ RM2_NODE_ID);
for (String confKey : YarnConfiguration
.getServiceAddressConfKeys(configuration)) {
configuration.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
configuration.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
configuration.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
}
// Enable webapp to test web-services also
configuration.setBoolean(MockRM.ENABLE_WEBAPP, true);
configuration.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
ClusterMetrics.destroy();
QueueMetrics.clearQueueMetrics();
DefaultMetricsSystem.shutdown();
}
示例4: tearDown
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@After
public void tearDown() {
if (hostFile != null && hostFile.exists()) {
hostFile.delete();
}
ClusterMetrics.destroy();
if (rm != null) {
rm.stop();
}
MetricsSystem ms = DefaultMetricsSystem.instance();
if (ms.getSource("ClusterMetrics") != null) {
DefaultMetricsSystem.shutdown();
}
}
示例5: testInitFirstVerifyCallBacks
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@Test public void testInitFirstVerifyCallBacks() throws Exception {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8)
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
.add("test.sink.test.class", TestSink.class.getName())
.add("test.*.source.filter.exclude", "s0")
.add("test.source.s1.metric.filter.exclude", "X*")
.add("test.sink.sink1.metric.filter.exclude", "Y*")
.add("test.sink.sink2.metric.filter.exclude", "Y*")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
ms.register("s0", "s0 desc", new TestSource("s0rec"));
TestSource s1 = ms.register("s1", "s1 desc", new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
MetricsSink sink1 = mock(MetricsSink.class);
MetricsSink sink2 = mock(MetricsSink.class);
ms.registerSink("sink1", "sink1 desc", sink1);
ms.registerSink("sink2", "sink2 desc", sink2);
ms.publishMetricsNow(); // publish the metrics
try {
verify(sink1, timeout(200).times(2)).putMetrics(r1.capture());
verify(sink2, timeout(200).times(2)).putMetrics(r2.capture());
} finally {
ms.stop();
ms.shutdown();
}
//When we call stop, at most two sources will be consumed by each sink thread.
List<MetricsRecord> mr1 = r1.getAllValues();
List<MetricsRecord> mr2 = r2.getAllValues();
checkMetricsRecords(mr1);
assertEquals("output", mr1, mr2);
}
示例6: tearDown
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@After
public void tearDown() {
this.registeredNodes.clear();
heartBeatID = 0;
ServiceOperations.stop(nm);
assertionFailedInThread.set(false);
DefaultMetricsSystem.shutdown();
}
示例7: tearDown
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@After
public void tearDown() {
if (scheduler != null) {
scheduler.stop();
scheduler = null;
}
if (resourceManager != null) {
resourceManager.stop();
resourceManager = null;
}
QueueMetrics.clearQueueMetrics();
DefaultMetricsSystem.shutdown();
}
示例8: tearDown
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@After
public void tearDown() {
ClusterMetrics.destroy();
MetricsSystem ms = DefaultMetricsSystem.instance();
if (ms.getSource("ClusterMetrics") != null) {
DefaultMetricsSystem.shutdown();
}
}
示例9: serviceStop
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@Override
protected void serviceStop() throws Exception {
if (webApp != null) {
webApp.stop();
}
DefaultMetricsSystem.shutdown();
super.serviceStop();
}
示例10: submit
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
public Job submit(Configuration conf, boolean mapSpeculative,
boolean reduceSpeculative) throws Exception {
String user = conf.get(MRJobConfig.USER_NAME, UserGroupInformation
.getCurrentUser().getShortUserName());
conf.set(MRJobConfig.USER_NAME, user);
conf.set(MRJobConfig.MR_AM_STAGING_DIR, testAbsPath.toString());
conf.setBoolean(MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR, true);
// TODO: fix the bug where the speculator gets events with
// not-fully-constructed objects. For now, disable speculative exec
conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, mapSpeculative);
conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, reduceSpeculative);
init(conf);
start();
DefaultMetricsSystem.shutdown();
Job job = getContext().getAllJobs().values().iterator().next();
if (assignedQueue != null) {
job.setQueueName(assignedQueue);
}
// Write job.xml
String jobFile = MRApps.getJobFile(conf, user,
TypeConverter.fromYarn(job.getID()));
LOG.info("Writing job conf to " + jobFile);
new File(jobFile).getParentFile().mkdirs();
conf.writeXml(new FileOutputStream(jobFile));
return job;
}
示例11: serviceStop
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@Override
protected void serviceStop() throws Exception {
if (isStopping.getAndSet(true)) {
return;
}
try {
super.serviceStop();
DefaultMetricsSystem.shutdown();
} finally {
// YARN-3641: NM's services stop get failed shouldn't block the
// release of NMLevelDBStore.
stopRecoveryStore();
}
}
示例12: stop
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
/**
* Stop the daemon with the given status code
* @param rc the status code with which to exit (non-zero
* should indicate an error)
*/
public void stop(int rc) {
this.resultCode = rc;
if (rpcServer != null) {
rpcServer.stop();
}
if (httpServer != null) {
try {
httpServer.stop();
} catch (IOException ioe) {
LOG.warn("Unable to stop HTTP server for " + this, ioe);
}
}
for (Journal j : journalsById.values()) {
IOUtils.cleanup(LOG, j);
}
DefaultMetricsSystem.shutdown();
if (journalNodeInfoBeanName != null) {
MBeans.unregister(journalNodeInfoBeanName);
journalNodeInfoBeanName = null;
}
if (tracer != null) {
tracer.close();
tracer = null;
}
}
示例13: testInitFirstVerifyStopInvokedImmediately
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@Test public void testInitFirstVerifyStopInvokedImmediately() throws Exception {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8)
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
.add("test.sink.test.class", TestSink.class.getName())
.add("test.*.source.filter.exclude", "s0")
.add("test.source.s1.metric.filter.exclude", "X*")
.add("test.sink.sink1.metric.filter.exclude", "Y*")
.add("test.sink.sink2.metric.filter.exclude", "Y*")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
ms.register("s0", "s0 desc", new TestSource("s0rec"));
TestSource s1 = ms.register("s1", "s1 desc", new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
MetricsSink sink1 = mock(MetricsSink.class);
MetricsSink sink2 = mock(MetricsSink.class);
ms.registerSink("sink1", "sink1 desc", sink1);
ms.registerSink("sink2", "sink2 desc", sink2);
ms.publishMetricsNow(); // publish the metrics
ms.stop();
ms.shutdown();
//When we call stop, at most two sources will be consumed by each sink thread.
verify(sink1, atMost(2)).putMetrics(r1.capture());
List<MetricsRecord> mr1 = r1.getAllValues();
verify(sink2, atMost(2)).putMetrics(r2.capture());
List<MetricsRecord> mr2 = r2.getAllValues();
if (mr1.size() != 0 && mr2.size() != 0) {
checkMetricsRecords(mr1);
assertEquals("output", mr1, mr2);
} else if (mr1.size() != 0) {
checkMetricsRecords(mr1);
} else if (mr2.size() != 0) {
checkMetricsRecords(mr2);
}
}
示例14: serviceStop
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@Override
protected void serviceStop() throws Exception {
DefaultMetricsSystem.shutdown();
super.serviceStop();
}
示例15: tearDown
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@After
public void tearDown() {
ClusterMetrics.destroy();
QueueMetrics.clearQueueMetrics();
DefaultMetricsSystem.shutdown();
}