本文整理汇总了Java中org.apache.hadoop.metrics2.lib.DefaultMetricsSystem.instance方法的典型用法代码示例。如果您正苦于以下问题:Java DefaultMetricsSystem.instance方法的具体用法?Java DefaultMetricsSystem.instance怎么用?Java DefaultMetricsSystem.instance使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.metrics2.lib.DefaultMetricsSystem
的用法示例。
在下文中一共展示了DefaultMetricsSystem.instance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: forQueue
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
public synchronized
static FSQueueMetrics forQueue(String queueName, Queue parent,
boolean enableUserMetrics, Configuration conf) {
MetricsSystem ms = DefaultMetricsSystem.instance();
QueueMetrics metrics = queueMetrics.get(queueName);
if (metrics == null) {
metrics = new FSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf)
.tag(QUEUE_INFO, queueName);
// Register with the MetricsSystems
if (ms != null) {
metrics = ms.register(
sourceName(queueName).toString(),
"Metrics for queue: " + queueName, metrics);
}
queueMetrics.put(queueName, metrics);
}
return (FSQueueMetrics)metrics;
}
示例2: tearDown
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@After
public void tearDown() {
if (hostFile != null && hostFile.exists()) {
hostFile.delete();
}
ClusterMetrics.destroy();
if (rm != null) {
rm.stop();
}
MetricsSystem ms = DefaultMetricsSystem.instance();
if (ms.getSource("ClusterMetrics") != null) {
DefaultMetricsSystem.shutdown();
}
}
示例3: run
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@Override
public void run() {
if (LOG.isTraceEnabled()) {
LOG.trace("Clearing JMX mbean cache.");
}
// This is pretty extreme but it's the best way that
// I could find to get metrics to be removed.
try {
if (DefaultMetricsSystem.instance() != null) {
DefaultMetricsSystem.instance().stop();
// Sleep some time so that the rest of the hadoop metrics
// system knows that things are done
Thread.sleep(500);
DefaultMetricsSystem.instance().start();
}
} catch (Exception exception) {
LOG.debug("error clearing the jmx it appears the metrics system hasn't been started",
exception);
}
}
示例4: forQueue
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
public synchronized static CSQueueMetrics forQueue(String queueName,
Queue parent, boolean enableUserMetrics, Configuration conf) {
MetricsSystem ms = DefaultMetricsSystem.instance();
QueueMetrics metrics = queueMetrics.get(queueName);
if (metrics == null) {
metrics =
new CSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf)
.tag(QUEUE_INFO, queueName);
// Register with the MetricsSystems
if (ms != null) {
metrics =
ms.register(sourceName(queueName).toString(), "Metrics for queue: "
+ queueName, metrics);
}
queueMetrics.put(queueName, metrics);
}
return (CSQueueMetrics) metrics;
}
示例5: create
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
static CleanerMetrics create() {
MetricsSystem ms = DefaultMetricsSystem.instance();
CleanerMetrics metricObject = new CleanerMetrics();
MetricsSourceBuilder sb = MetricsAnnotations.newSourceBuilder(metricObject);
final MetricsSource s = sb.build();
ms.register("cleaner", "The cleaner service of truly shared cache", s);
metricObject.metricSource = s;
return metricObject;
}
示例6: create
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
static SharedCacheUploaderMetrics create() {
MetricsSystem ms = DefaultMetricsSystem.instance();
SharedCacheUploaderMetrics metrics =
new SharedCacheUploaderMetrics();
ms.register("SharedCacheUploaderRequests", null, metrics);
return metrics;
}
示例7: create
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
static ClientSCMMetrics create() {
MetricsSystem ms = DefaultMetricsSystem.instance();
ClientSCMMetrics metrics = new ClientSCMMetrics();
ms.register("clientRequests", null, metrics);
return metrics;
}
示例8: registerMetrics
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
private static void registerMetrics() {
registry = new MetricsRegistry(RECORD_INFO);
registry.tag(RECORD_INFO, "ResourceManager");
MetricsSystem ms = DefaultMetricsSystem.instance();
if (ms != null) {
ms.register("ClusterMetrics", "Metrics for the Yarn Cluster", INSTANCE);
}
}
示例9: FSOpDurations
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
private FSOpDurations() {
registry = new MetricsRegistry(RECORD_INFO);
registry.tag(RECORD_INFO, "FSOpDurations");
MetricsSystem ms = DefaultMetricsSystem.instance();
if (ms != null) {
ms.register(RECORD_INFO.name(), RECORD_INFO.description(), this);
}
}
示例10: tearDown
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
@After
public void tearDown() {
ClusterMetrics.destroy();
MetricsSystem ms = DefaultMetricsSystem.instance();
if (ms.getSource("ClusterMetrics") != null) {
DefaultMetricsSystem.shutdown();
}
}
示例11: create
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
public static Nfs3Metrics create(Configuration conf, String gatewayName) {
String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
MetricsSystem ms = DefaultMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);
// Percentile measurement is [50th,75th,90th,95th,99th] currently
int[] intervals = conf
.getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
}
示例12: create
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
String processName = r.toString();
MetricsSystem ms = DefaultMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);
// Percentile measurement is off by default, by watching no intervals
int[] intervals =
conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
return ms.register(new NameNodeMetrics(processName, sessionId,
intervals, jm));
}
示例13: create
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
public static DataNodeMetrics create(Configuration conf, String dnName) {
String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
MetricsSystem ms = DefaultMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms);
String name = "DataNodeActivity-"+ (dnName.isEmpty()
? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt()
: dnName.replace(':', '-'));
// Percentile measurement is off by default, by watching no intervals
int[] intervals =
conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
return ms.register(name, null, new DataNodeMetrics(name, sessionId,
intervals, jm));
}
示例14: create
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
public static DataNodeMetrics create(Configuration conf, String dnName) {
String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
MetricsSystem ms = DefaultMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms);
String name = "DataNodeActivity-"+ (dnName.isEmpty()
? "UndefinedDataNodeName"+ ThreadLocalRandom.current().nextInt()
: dnName.replace(':', '-'));
// Percentile measurement is off by default, by watching no intervals
int[] intervals =
conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
return ms.register(name, null, new DataNodeMetrics(name, sessionId,
intervals, jm));
}
示例15: ShuffleHandler
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; //导入方法依赖的package包/类
public ShuffleHandler() {
this(DefaultMetricsSystem.instance());
}