本文整理汇总了Java中org.apache.hadoop.metrics.MetricsContext类的典型用法代码示例。如果您正苦于以下问题:Java MetricsContext类的具体用法?Java MetricsContext怎么用?Java MetricsContext使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MetricsContext类属于org.apache.hadoop.metrics包,在下文中一共展示了MetricsContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: init
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
@Override
@InterfaceAudience.Private
public void init(String contextName, ContextFactory factory) {
super.init(contextName, factory);
int nKids;
try {
String sKids = getAttribute(ARITY_LABEL);
nKids = Integer.parseInt(sKids);
} catch (Exception e) {
LOG.error("Unable to initialize composite metric " + contextName +
": could not init arity", e);
return;
}
for (int i = 0; i < nKids; ++i) {
MetricsContext ctxt = MetricsUtil.getContext(
String.format(SUB_FMT, contextName, i), contextName);
if (null != ctxt) {
subctxt.add(ctxt);
}
}
}
示例2: emitRecord
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
@InterfaceAudience.Private
@Override
protected void emitRecord(String contextName, String recordName,
OutputRecord outRec) throws IOException {
for (MetricsContext ctxt : subctxt) {
try {
((AbstractMetricsContext)ctxt).emitRecord(
contextName, recordName, outRec);
if (contextName == null || recordName == null || outRec == null) {
throw new IOException(contextName + ":" + recordName + ":" + outRec);
}
} catch (IOException e) {
LOG.warn("emitRecord failed: " + ctxt.getContextName(), e);
}
}
}
示例3: doUpdates
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
*/
public void doUpdates(MetricsContext unused) {
synchronized (this) {
metricsRecord.incrMetric("maps_launched", numMapTasksLaunched);
metricsRecord.incrMetric("maps_completed", numMapTasksCompleted);
metricsRecord.incrMetric("reduces_launched", numReduceTasksLaunched);
metricsRecord.incrMetric("reduces_completed", numReduceTasksCompleted);
metricsRecord.incrMetric("waiting_maps", numWaitingMaps);
metricsRecord.incrMetric("waiting_reduces", numWaitingReduces);
numMapTasksLaunched = 0;
numMapTasksCompleted = 0;
numReduceTasksLaunched = 0;
numReduceTasksCompleted = 0;
numWaitingMaps = 0;
numWaitingReduces = 0;
}
metricsRecord.update();
}
示例4: doUpdates
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
public void doUpdates(MetricsContext unused) {
synchronized (this) {
shuffleMetrics.incrMetric("shuffle_input_bytes", numBytes);
shuffleMetrics.incrMetric("shuffle_failed_fetches",
numFailedFetches);
shuffleMetrics.incrMetric("shuffle_success_fetches",
numSuccessFetches);
if (numCopiers != 0) {
shuffleMetrics.setMetric("shuffle_fetchers_busy_percent",
100*((float)numThreadsBusy/numCopiers));
} else {
shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 0);
}
numBytes = 0;
numSuccessFetches = 0;
numFailedFetches = 0;
}
shuffleMetrics.update();
}
示例5: init
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
@InterfaceAudience.Private
public void init(String contextName, ContextFactory factory) {
super.init(contextName, factory);
int nKids;
try {
String sKids = getAttribute(ARITY_LABEL);
nKids = Integer.parseInt(sKids);
} catch (Exception e) {
LOG.error("Unable to initialize composite metric " + contextName +
": could not init arity", e);
return;
}
for (int i = 0; i < nKids; ++i) {
MetricsContext ctxt = MetricsUtil.getContext(
String.format(SUB_FMT, contextName, i), contextName);
if (null != ctxt) {
subctxt.add(ctxt);
}
}
}
示例6: MasterMetrics
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
public MasterMetrics(final String name) {
MetricsContext context = MetricsUtil.getContext("hbase");
metricsRecord = MetricsUtil.createRecord(context, "master");
metricsRecord.setTag("Master", name);
context.registerUpdater(this);
JvmMetrics.init("Master", name);
HBaseInfo.init();
// expose the MBean for metrics
masterStatistics = new MasterStatistics(this.registry);
// get custom attributes
try {
Object m =
ContextFactory.getFactory().getAttribute("hbase.extendedperiod");
if (m instanceof String) {
this.extendedPeriod = Long.parseLong((String) m)*1000;
}
} catch (IOException ioe) {
LOG.info("Couldn't load ContextFactory for Metrics config info");
}
LOG.info("Initialized");
}
示例7: doUpdates
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
* @param unused
*/
public void doUpdates(MetricsContext unused) {
synchronized (this) {
this.lastUpdate = System.currentTimeMillis();
// has the extended period for long-living stats elapsed?
if (this.extendedPeriod > 0 &&
this.lastUpdate - this.lastExtUpdate >= this.extendedPeriod) {
this.lastExtUpdate = this.lastUpdate;
this.splitTime.resetMinMaxAvg();
this.splitSize.resetMinMaxAvg();
this.resetAllMinMax();
}
this.cluster_requests.pushMetric(metricsRecord);
this.splitTime.pushMetric(metricsRecord);
this.splitSize.pushMetric(metricsRecord);
}
this.metricsRecord.update();
}
示例8: HBaseRpcMetrics
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
public HBaseRpcMetrics(String hostName, String port) {
MetricsContext context = MetricsUtil.getContext("rpc");
metricsRecord = MetricsUtil.createRecord(context, "metrics");
metricsRecord.setTag("port", port);
LOG.info("Initializing RPC Metrics with hostName="
+ hostName + ", port=" + port);
context.registerUpdater(this);
initMethods(HMasterInterface.class);
initMethods(HMasterRegionInterface.class);
initMethods(HRegionInterface.class);
rpcStatistics = new HBaseRPCStatistics(this.registry, hostName, port);
}
示例9: logMetrics
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
private static void logMetrics(ThriftMetrics metrics) throws Exception {
if (LOG.isDebugEnabled()) {
return;
}
MetricsContext context = MetricsUtil.getContext(
ThriftMetrics.CONTEXT_NAME);
metrics.doUpdates(context);
for (String key : context.getAllRecords().keySet()) {
for (OutputRecord record : context.getAllRecords().get(key)) {
for (String name : record.getMetricNames()) {
LOG.debug("metrics:" + name + " value:" +
record.getMetric(name).intValue());
}
}
}
}
示例10: doUpdates
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
*/
@Override
public void doUpdates(MetricsContext unused) {
synchronized (this) {
metricsRecord.setMetric("maps_running", tt.mapTotal);
metricsRecord.setMetric("reduces_running", tt.reduceTotal);
metricsRecord.setMetric("mapTaskSlots", (short)tt.getMaxCurrentMapTasks());
metricsRecord.setMetric("reduceTaskSlots",
(short)tt.getMaxCurrentReduceTasks());
metricsRecord.setMetric("failedDirs", tt.getNumDirFailures());
metricsRecord.incrMetric("tasks_completed", numCompletedTasks);
metricsRecord.incrMetric("tasks_failed_timeout", timedoutTasks);
metricsRecord.incrMetric("tasks_failed_ping", tasksFailedPing);
numCompletedTasks = 0;
timedoutTasks = 0;
tasksFailedPing = 0;
}
metricsRecord.update();
}
示例11: doUpdates
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
public void doUpdates(MetricsContext unused) {
synchronized (this) {
if (workerThreads != 0) {
shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent",
100*((float)serverHandlerBusy/workerThreads));
} else {
shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 0);
}
shuffleMetricsRecord.incrMetric("shuffle_output_bytes",
outputBytes);
shuffleMetricsRecord.incrMetric("shuffle_failed_outputs",
failedOutputs);
shuffleMetricsRecord.incrMetric("shuffle_success_outputs",
successOutputs);
shuffleMetricsRecord.incrMetric("shuffle_exceptions_caught",
exceptionsCaught);
outputBytes = 0;
failedOutputs = 0;
successOutputs = 0;
exceptionsCaught = 0;
}
shuffleMetricsRecord.update();
}
示例12: terminate
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
@Override
public void terminate() throws IOException {
if (eventLog != null)
eventLog.log("SHUTDOWN");
running = false;
if (jobListener != null)
taskTrackerManager.removeJobInProgressListener(jobListener);
if (eagerInitListener != null) {
taskTrackerManager.removeJobInProgressListener(eagerInitListener);
eagerInitListener.terminate();
}
if (eventLog != null)
eventLog.shutdown();
if (metricsUpdater != null) {
MetricsContext context = MetricsUtil.getContext("fairscheduler");
context.unregisterUpdater(metricsUpdater);
metricsUpdater = null;
}
}
示例13: NamespaceNotifierMetrics
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
public NamespaceNotifierMetrics(Configuration conf, String serverId) {
String sessionId = conf.get("session.id");
JvmMetrics.init("NamespaceNotifier", sessionId);
notifierActivityMBean = new NamespaceNotifierActivityMBean(registry,
"" + serverId);
MetricsContext context = MetricsUtil.getContext("dfs");
metricsRecord = MetricsUtil.createRecord(context, "namespacenotifier");
metricsRecord.setTag("sessionId", sessionId);
context.registerUpdater(this);
LOG.info("Initializing NamespaceNotifierMetrics using context object:" +
context.getClass().getName() + " and record: " +
metricsRecord.getClass().getCanonicalName());
}
示例14: doUpdates
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
@Override
public void doUpdates(MetricsContext unused) {
synchronized (aggregateJobStats) {
// Update metrics with aggregate job stats and reset the aggregate.
aggregateJobStats.incrementMetricsAndReset(metricsRecord);
incrementMetricsAndReset(metricsRecord, aggregateCounters);
incrementMetricsAndReset(metricsRecord, aggregateErrors);
for (Map.Entry<String, MetricsRecord> entry :
poolToMetricsRecord.entrySet()) {
String pool = entry.getKey();
JobStats poolJobStats = poolToJobStats.get(pool);
poolJobStats.incrementMetricsAndReset(entry.getValue());
Counters poolCounters = poolToJobCounters.get(pool);
incrementMetricsAndReset(entry.getValue(), poolCounters);
}
}
metricsRecord.update();
}
示例15: HighTideNodeMetrics
import org.apache.hadoop.metrics.MetricsContext; //导入依赖的package包/类
public HighTideNodeMetrics(Configuration conf, HighTideNode hightideNode) {
String sessionId = conf.get("session.id");
// Initiate Java VM metrics
JvmMetrics.init("HighTideNode", sessionId);
// Now the Mbean for the name node - this also registers the MBean
hightidenodeActivityMBean = new HighTideNodeActivityMBean(registry);
// Create a record for HighTideNode metrics
MetricsContext metricsContext = MetricsUtil.getContext("dfs");
metricsRecord = MetricsUtil.createRecord(metricsContext, "hightidenode");
metricsRecord.setTag("sessionId", sessionId);
metricsContext.registerUpdater(this);
LOG.info("Initializing HighTideNodeMetrics using context object:" +
metricsContext.getClass().getName());
}