本文整理汇总了Java中org.apache.hadoop.metrics.MetricsUtil类的典型用法代码示例。如果您正苦于以下问题:Java MetricsUtil类的具体用法?Java MetricsUtil怎么用?Java MetricsUtil使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MetricsUtil类属于org.apache.hadoop.metrics包,在下文中一共展示了MetricsUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: init
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
@Override
@InterfaceAudience.Private
public void init(String contextName, ContextFactory factory) {
super.init(contextName, factory);
int nKids;
try {
String sKids = getAttribute(ARITY_LABEL);
nKids = Integer.parseInt(sKids);
} catch (Exception e) {
LOG.error("Unable to initialize composite metric " + contextName +
": could not init arity", e);
return;
}
for (int i = 0; i < nKids; ++i) {
MetricsContext ctxt = MetricsUtil.getContext(
String.format(SUB_FMT, contextName, i), contextName);
if (null != ctxt) {
subctxt.add(ctxt);
}
}
}
示例2: init
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
@InterfaceAudience.Private
public void init(String contextName, ContextFactory factory) {
super.init(contextName, factory);
int nKids;
try {
String sKids = getAttribute(ARITY_LABEL);
nKids = Integer.parseInt(sKids);
} catch (Exception e) {
LOG.error("Unable to initialize composite metric " + contextName +
": could not init arity", e);
return;
}
for (int i = 0; i < nKids; ++i) {
MetricsContext ctxt = MetricsUtil.getContext(
String.format(SUB_FMT, contextName, i), contextName);
if (null != ctxt) {
subctxt.add(ctxt);
}
}
}
示例3: MasterMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
public MasterMetrics(final String name) {
MetricsContext context = MetricsUtil.getContext("hbase");
metricsRecord = MetricsUtil.createRecord(context, "master");
metricsRecord.setTag("Master", name);
context.registerUpdater(this);
JvmMetrics.init("Master", name);
HBaseInfo.init();
// expose the MBean for metrics
masterStatistics = new MasterStatistics(this.registry);
// get custom attributes
try {
Object m =
ContextFactory.getFactory().getAttribute("hbase.extendedperiod");
if (m instanceof String) {
this.extendedPeriod = Long.parseLong((String) m)*1000;
}
} catch (IOException ioe) {
LOG.info("Couldn't load ContextFactory for Metrics config info");
}
LOG.info("Initialized");
}
示例4: RegionServerDynamicMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
private RegionServerDynamicMetrics(HRegionServer regionServer) {
this.context = MetricsUtil.getContext("hbase");
this.metricsRecord = MetricsUtil.createRecord(
this.context,
"RegionServerDynamicStatistics");
context.registerUpdater(this);
this.rsDynamicStatistics = new RegionServerDynamicStatistics(this.registry);
this.regionServer = regionServer;
try {
updateMbeanInfoIfMetricsListChanged =
this.rsDynamicStatistics.getClass().getSuperclass()
.getDeclaredMethod("updateMbeanInfoIfMetricsListChanged",
new Class[]{});
updateMbeanInfoIfMetricsListChanged.setAccessible(true);
} catch (Exception e) {
LOG.error(e);
}
}
示例5: RegionServerMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
public RegionServerMetrics() {
MetricsContext context = MetricsUtil.getContext("hbase");
metricsRecord = MetricsUtil.createRecord(context, "regionserver");
String name = Thread.currentThread().getName();
metricsRecord.setTag("RegionServer", name);
context.registerUpdater(this);
// Add jvmmetrics.
JvmMetrics.init("RegionServer", name);
// Add Hbase Info metrics
HBaseInfo.init();
// export for JMX
statistics = new RegionServerStatistics(this.registry, name);
// get custom attributes
try {
Object m = ContextFactory.getFactory().getAttribute("hbase.extendedperiod");
if (m instanceof String) {
this.extendedPeriod = Long.parseLong((String) m)*1000;
}
} catch (IOException ioe) {
LOG.info("Couldn't load ContextFactory for Metrics config info");
}
LOG.info("Initialized");
}
示例6: HBaseRpcMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
public HBaseRpcMetrics(String hostName, String port) {
MetricsContext context = MetricsUtil.getContext("rpc");
metricsRecord = MetricsUtil.createRecord(context, "metrics");
metricsRecord.setTag("port", port);
LOG.info("Initializing RPC Metrics with hostName="
+ hostName + ", port=" + port);
context.registerUpdater(this);
initMethods(HMasterInterface.class);
initMethods(HMasterRegionInterface.class);
initMethods(HRegionInterface.class);
rpcStatistics = new HBaseRPCStatistics(this.registry, hostName, port);
}
示例7: logMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
private static void logMetrics(ThriftMetrics metrics) throws Exception {
if (LOG.isDebugEnabled()) {
return;
}
MetricsContext context = MetricsUtil.getContext(
ThriftMetrics.CONTEXT_NAME);
metrics.doUpdates(context);
for (String key : context.getAllRecords().keySet()) {
for (OutputRecord record : context.getAllRecords().get(key)) {
for (String name : record.getMetricNames()) {
LOG.debug("metrics:" + name + " value:" +
record.getMetric(name).intValue());
}
}
}
}
示例8: init
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
@InterfaceAudience.Private
public void init(String contextName, ContextFactory factory) {
super.init(contextName, factory);
int nKids;
try {
String sKids = getAttribute(ARITY_LABEL);
nKids = Integer.valueOf(sKids);
} catch (Exception e) {
LOG.error("Unable to initialize composite metric " + contextName +
": could not init arity", e);
return;
}
for (int i = 0; i < nKids; ++i) {
MetricsContext ctxt = MetricsUtil.getContext(
String.format(SUB_FMT, contextName, i), contextName);
if (null != ctxt) {
subctxt.add(ctxt);
}
}
}
示例9: terminate
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
@Override
public void terminate() throws IOException {
if (eventLog != null)
eventLog.log("SHUTDOWN");
running = false;
if (jobListener != null)
taskTrackerManager.removeJobInProgressListener(jobListener);
if (eagerInitListener != null) {
taskTrackerManager.removeJobInProgressListener(eagerInitListener);
eagerInitListener.terminate();
}
if (eventLog != null)
eventLog.shutdown();
if (metricsUpdater != null) {
MetricsContext context = MetricsUtil.getContext("fairscheduler");
context.unregisterUpdater(metricsUpdater);
metricsUpdater = null;
}
}
示例10: NamespaceNotifierMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
public NamespaceNotifierMetrics(Configuration conf, String serverId) {
String sessionId = conf.get("session.id");
JvmMetrics.init("NamespaceNotifier", sessionId);
notifierActivityMBean = new NamespaceNotifierActivityMBean(registry,
"" + serverId);
MetricsContext context = MetricsUtil.getContext("dfs");
metricsRecord = MetricsUtil.createRecord(context, "namespacenotifier");
metricsRecord.setTag("sessionId", sessionId);
context.registerUpdater(this);
LOG.info("Initializing NamespaceNotifierMetrics using context object:" +
context.getClass().getName() + " and record: " +
metricsRecord.getClass().getCanonicalName());
}
示例11: ClusterManagerMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
/**
* Constructor.
* @param types The available resource types.
*/
public ClusterManagerMetrics(Collection<ResourceType> types) {
context = MetricsUtil.getContext(CONTEXT_NAME);
metricsRecord = MetricsUtil.createRecord(context, CONTEXT_NAME);
typeToResourceRequested = createTypeToResourceCountMap(types, "requested");
typeToResourceGranted = createTypeToResourceCountMap(types, "granted");
typeToResourceRevoked = createTypeToResourceCountMap(types, "revoked");
typeToResourceReleased = createTypeToResourceCountMap(types, "released");
typeToPendingCount = createTypeToCountMap(types, "pending");
typeToRunningCount = createTypeToCountMap(types, "running");
typeToTotalSlots = createTypeToCountMap(types, "total");
typeToFreeSlots = createTypeToCountMap(types, "free");
typeToSchedulerRunTime = createTypeToCountMap(types, "scheduler_runtime");
typeToSchedulerCurrentCycleStart =
new ConcurrentHashMap<ResourceType, Long>();
sessionStatusToMetrics = createSessionStatusToMetricsMap();
aliveNodes = new MetricsIntValue("alive_nodes", registry);
deadNodes = new MetricsIntValue("dead_nodes", registry);
blacklistedNodes = new MetricsIntValue("blacklisted_nodes", registry);
numRunningSessions = new MetricsIntValue("num_running_sessions", registry);
totalSessionCount = new MetricsTimeVaryingInt("total_sessions", registry);
pendingCallsCount = new MetricsIntValue("num_pending_calls", registry);
numCJTFailures = new MetricsTimeVaryingInt("num_cjt_failures", registry);
numTaskTrackerRestarted = new MetricsIntValue("num_task_tracker_restarted", registry);
numRemoteJTTimedout = new MetricsIntValue("num_remotejt_timedout", registry);
}
示例12: submitPoolMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
private void submitPoolMetrics(PoolInfo info) {
MetricsRecord record = poolToMetricsRecord.get(info.poolName);
if (record == null) {
record = MetricsUtil.createRecord(context, "pool-" + info.poolName);
FairScheduler.LOG.info("Create metrics record for pool:" + info.poolName);
poolToMetricsRecord.put(info.poolName, record);
}
record.setMetric("min_map", info.minMaps);
record.setMetric("min_reduce", info.minReduces);
record.setMetric("max_map", info.maxMaps);
record.setMetric("max_reduce", info.maxReduces);
record.setMetric("running_map", info.runningMaps);
record.setMetric("running_reduce", info.runningReduces);
record.setMetric("runnable_map", info.runnableMaps);
record.setMetric("runnable_reduce", info.runnableReduces);
record.setMetric("inited_tasks", info.initedTasks);
record.setMetric("max_inited_tasks", info.maxInitedTasks);
int runningJobs = info.runningJobs;
record.setMetric("avg_first_map_wait_ms",
(runningJobs == 0) ? 0 : info.totalFirstMapWaitTime / runningJobs);
record.setMetric("avg_first_reduce_wait_ms",
(runningJobs == 0) ? 0 : info.totalFirstReduceWaitTime / runningJobs);
}
示例13: init
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
public void init(String contextName, ContextFactory factory) {
super.init(contextName, factory);
int nKids;
try {
String sKids = getAttribute(ARITY_LABEL);
nKids = Integer.valueOf(sKids);
} catch (Exception e) {
LOG.error("Unable to initialize composite metric " + contextName +
": could not init arity", e);
return;
}
for (int i = 0; i < nKids; ++i) {
MetricsContext ctxt = MetricsUtil.getContext(
String.format(SUB_FMT, contextName, i), contextName);
if (null != ctxt) {
subctxt.add(ctxt);
}
}
}
示例14: HighTideNodeMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
public HighTideNodeMetrics(Configuration conf, HighTideNode hightideNode) {
String sessionId = conf.get("session.id");
// Initiate Java VM metrics
JvmMetrics.init("HighTideNode", sessionId);
// Now the Mbean for the name node - this also registers the MBean
hightidenodeActivityMBean = new HighTideNodeActivityMBean(registry);
// Create a record for HighTideNode metrics
MetricsContext metricsContext = MetricsUtil.getContext("dfs");
metricsRecord = MetricsUtil.createRecord(metricsContext, "hightidenode");
metricsRecord.setTag("sessionId", sessionId);
metricsContext.registerUpdater(this);
LOG.info("Initializing HighTideNodeMetrics using context object:" +
metricsContext.getClass().getName());
}
示例15: create
import org.apache.hadoop.metrics.MetricsUtil; //导入依赖的package包/类
static IPCLoggerChannelMetrics create(IPCLoggerChannel ch) {
String name = getName(ch);
synchronized (REGISTRY) {
IPCLoggerChannelMetrics m = REGISTRY.get(name);
if (m != null) {
m.setChannel(ch);
} else {
MetricsContext metricsContext = MetricsUtil.getContext("dfs");
MetricsRecord metricsRecord = MetricsUtil.createRecord(metricsContext,
"loggerchannel");
metricsRecord.setTag("loggerchannel", name);
m = new IPCLoggerChannelMetrics(ch, metricsRecord, name);
metricsContext.registerUpdater(m);
REGISTRY.put(name, m);
}
return m;
}
}