本文整理汇总了Java中org.apache.hadoop.metrics2.MetricsSystem类的典型用法代码示例。如果您正苦于以下问题:Java MetricsSystem类的具体用法?Java MetricsSystem怎么用?Java MetricsSystem使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MetricsSystem类属于org.apache.hadoop.metrics2包,在下文中一共展示了MetricsSystem类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testFailedWrite
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
/**
* Test that writing fails when the directory isn't writable.
*/
@Test
public void testFailedWrite() {
String path = methodDir.getAbsolutePath();
MetricsSystem ms = initMetricsSystem(path, false, false);
new MyMetrics1().registerWith(ms);
methodDir.setWritable(false);
MockSink.errored = false;
try {
// publish the metrics
ms.publishMetricsNow();
assertTrue("No exception was generated while writing metrics "
+ "even though the target directory was not writable",
MockSink.errored);
ms.stop();
ms.shutdown();
} finally {
// Make sure the dir is writable again so we can delete it at the end
methodDir.setWritable(true);
}
}
示例2: doWriteTest
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
/**
* Helper method that writes metrics files to a target path, reads those
* files, and returns the contents of all files as a single string. This
* method will assert that the correct number of files is found.
*
* @param ms an initialized MetricsSystem to use
* @param path the target path from which to read the logs
* @param count the number of log files to expect
* @return the contents of the log files
* @throws IOException when the log file can't be read
* @throws URISyntaxException when the target path is an invalid URL
*/
protected String doWriteTest(MetricsSystem ms, String path, int count)
throws IOException, URISyntaxException {
final String then = DATE_FORMAT.format(new Date());
MyMetrics1 mm1 = new MyMetrics1().registerWith(ms);
new MyMetrics2().registerWith(ms);
mm1.testMetric1.incr();
mm1.testMetric2.incr(2);
ms.publishMetricsNow(); // publish the metrics
ms.stop();
ms.shutdown();
return readLogFile(path, then, count);
}
示例3: testUnregisterSource
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
@Test public void testUnregisterSource() {
MetricsSystem ms = new MetricsSystemImpl();
TestSource ts1 = new TestSource("ts1");
TestSource ts2 = new TestSource("ts2");
ms.register("ts1", "", ts1);
ms.register("ts2", "", ts2);
MetricsSource s1 = ms.getSource("ts1");
assertNotNull(s1);
// should work when metrics system is not started
ms.unregisterSource("ts1");
s1 = ms.getSource("ts1");
assertNull(s1);
MetricsSource s2 = ms.getSource("ts2");
assertNotNull(s2);
ms.shutdown();
}
示例4: testRegisterSourceWithoutName
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
@Test public void testRegisterSourceWithoutName() {
MetricsSystem ms = new MetricsSystemImpl();
TestSource ts = new TestSource("ts");
TestSource2 ts2 = new TestSource2("ts2");
ms.register(ts);
ms.register(ts2);
ms.init("TestMetricsSystem");
// if metrics source is registered without name,
// the class name will be used as the name
MetricsSourceAdapter sa = ((MetricsSystemImpl) ms)
.getSourceAdapter("TestSource");
assertNotNull(sa);
MetricsSourceAdapter sa2 = ((MetricsSystemImpl) ms)
.getSourceAdapter("TestSource2");
assertNotNull(sa2);
ms.shutdown();
}
示例5: forContainer
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
synchronized static ContainerMetrics forContainer(
MetricsSystem ms, ContainerId containerId, long flushPeriodMs) {
ContainerMetrics metrics = usageMetrics.get(containerId);
if (metrics == null) {
metrics = new ContainerMetrics(
ms, containerId, flushPeriodMs).tag(RECORD_INFO, containerId);
// Register with the MetricsSystems
if (ms != null) {
metrics =
ms.register(sourceName(containerId),
"Metrics for container: " + containerId, metrics);
}
usageMetrics.put(containerId, metrics);
}
return metrics;
}
示例6: forQueue
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
public synchronized
static QueueMetrics forQueue(MetricsSystem ms, String queueName,
Queue parent, boolean enableUserMetrics,
Configuration conf) {
QueueMetrics metrics = queueMetrics.get(queueName);
if (metrics == null) {
metrics =
new QueueMetrics(ms, queueName, parent, enableUserMetrics, conf).
tag(QUEUE_INFO, queueName);
// Register with the MetricsSystems
if (ms != null) {
metrics =
ms.register(
sourceName(queueName).toString(),
"Metrics for queue: " + queueName, metrics);
}
queueMetrics.put(queueName, metrics);
}
return metrics;
}
示例7: forQueue
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
public synchronized
static FSQueueMetrics forQueue(String queueName, Queue parent,
boolean enableUserMetrics, Configuration conf) {
MetricsSystem ms = DefaultMetricsSystem.instance();
QueueMetrics metrics = queueMetrics.get(queueName);
if (metrics == null) {
metrics = new FSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf)
.tag(QUEUE_INFO, queueName);
// Register with the MetricsSystems
if (ms != null) {
metrics = ms.register(
sourceName(queueName).toString(),
"Metrics for queue: " + queueName, metrics);
}
queueMetrics.put(queueName, metrics);
}
return (FSQueueMetrics)metrics;
}
示例8: tearDown
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
@After
public void tearDown() {
if (hostFile != null && hostFile.exists()) {
hostFile.delete();
}
ClusterMetrics.destroy();
if (rm != null) {
rm.stop();
}
MetricsSystem ms = DefaultMetricsSystem.instance();
if (ms.getSource("ClusterMetrics") != null) {
DefaultMetricsSystem.shutdown();
}
}
示例9: testShuffleMetrics
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
/**
* Validate shuffle connection and input/output metrics.
*
* @throws Exception exception
*/
@Test (timeout = 10000)
public void testShuffleMetrics() throws Exception {
MetricsSystem ms = new MetricsSystemImpl();
ShuffleHandler sh = new ShuffleHandler(ms);
ChannelFuture cf = make(stub(ChannelFuture.class).
returning(true, false).from.isSuccess());
sh.metrics.shuffleConnections.incr();
sh.metrics.shuffleOutputBytes.incr(1*MiB);
sh.metrics.shuffleConnections.incr();
sh.metrics.shuffleOutputBytes.incr(2*MiB);
checkShuffleMetrics(ms, 3*MiB, 0 , 0, 2);
sh.metrics.operationComplete(cf);
sh.metrics.operationComplete(cf);
checkShuffleMetrics(ms, 3*MiB, 1, 1, 0);
}
示例10: forContainer
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
synchronized static ContainerMetrics forContainer(
MetricsSystem ms, ContainerId containerId, long flushPeriodMs,
long delayMs) {
ContainerMetrics metrics = usageMetrics.get(containerId);
if (metrics == null) {
metrics = new ContainerMetrics(ms, containerId, flushPeriodMs,
delayMs).tag(RECORD_INFO, containerId);
// Register with the MetricsSystems
if (ms != null) {
metrics =
ms.register(sourceName(containerId),
"Metrics for container: " + containerId, metrics);
}
usageMetrics.put(containerId, metrics);
}
return metrics;
}
示例11: forQueue
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
public synchronized static CSQueueMetrics forQueue(String queueName,
Queue parent, boolean enableUserMetrics, Configuration conf) {
MetricsSystem ms = DefaultMetricsSystem.instance();
QueueMetrics metrics = queueMetrics.get(queueName);
if (metrics == null) {
metrics =
new CSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf)
.tag(QUEUE_INFO, queueName);
// Register with the MetricsSystems
if (ms != null) {
metrics =
ms.register(sourceName(queueName).toString(), "Metrics for queue: "
+ queueName, metrics);
}
queueMetrics.put(queueName, metrics);
}
return (CSQueueMetrics) metrics;
}
示例12: ContainerMetrics
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
ContainerMetrics(
MetricsSystem ms, ContainerId containerId, long flushPeriodMs) {
this.recordInfo =
info(sourceName(containerId), RECORD_INFO.description());
this.registry = new MetricsRegistry(recordInfo);
this.metricsSystem = ms;
this.containerId = containerId;
this.flushPeriodMs = flushPeriodMs;
scheduleTimerTaskIfRequired();
this.pMemMBsStat = registry.newStat(
PMEM_USAGE_METRIC_NAME, "Physical memory stats", "Usage", "MBs", true);
this.cpuCoreUsagePercent = registry.newStat(
PHY_CPU_USAGE_METRIC_NAME, "Physical Cpu core percent usage stats",
"Usage", "Percents", true);
this.milliVcoresUsed = registry.newStat(
VCORE_USAGE_METRIC_NAME, "1000 times Vcore usage", "Usage",
"MilliVcores", true);
this.pMemLimitMbs = registry.newGauge(
PMEM_LIMIT_METRIC_NAME, "Physical memory limit in MBs", 0);
this.vMemLimitMbs = registry.newGauge(
VMEM_LIMIT_METRIC_NAME, "Virtual memory limit in MBs", 0);
this.cpuVcoreLimit = registry.newGauge(
VCORE_LIMIT_METRIC_NAME, "CPU limit in number of vcores", 0);
}
示例13: HadoopMetrics2Reporter
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
private HadoopMetrics2Reporter(MetricRegistry registry, TimeUnit rateUnit, TimeUnit durationUnit,
MetricFilter filter, MetricsSystem metrics2System, String jmxContext, String description,
String recordName, String context) {
super(registry, "hadoop-metrics2-reporter", filter, rateUnit, durationUnit);
this.metrics2Registry = new MetricsRegistry(Interns.info(jmxContext, description));
this.metrics2System = metrics2System;
this.recordName = recordName;
this.context = context;
// These could really be Collection.emptyMap(), but this makes testing a bit easier.
this.dropwizardGauges = EMPTY_GAUGE_MAP;
this.dropwizardCounters = EMPTY_COUNTER_MAP;
this.dropwizardHistograms = EMPTY_HISTOGRAM_MAP;
this.dropwizardMeters = EMPTY_METER_MAP;
this.dropwizardTimers = EMPTY_TIMER_MAP;
// Register this source with the Metrics2 system.
// Make sure this is the last thing done as getMetrics() can be called at any time after.
this.metrics2System.register(Objects.requireNonNull(jmxContext),
Objects.requireNonNull(description), this);
}
示例14: testShuffleMetrics
import org.apache.hadoop.metrics2.MetricsSystem; //导入依赖的package包/类
@Test (timeout = 10000)
public void testShuffleMetrics() throws Exception {
MetricsSystem ms = new MetricsSystemImpl();
ShuffleHandler sh = new ShuffleHandler(ms);
ChannelFuture cf = make(stub(ChannelFuture.class).
returning(true, false).from.isSuccess());
sh.metrics.shuffleConnections.incr();
sh.metrics.shuffleOutputBytes.incr(1*MiB);
sh.metrics.shuffleConnections.incr();
sh.metrics.shuffleOutputBytes.incr(2*MiB);
checkShuffleMetrics(ms, 3*MiB, 0 , 0, 2);
sh.metrics.operationComplete(cf);
sh.metrics.operationComplete(cf);
checkShuffleMetrics(ms, 3*MiB, 1, 1, 0);
}