本文整理汇总了Java中org.apache.hadoop.metrics.util.MetricsTimeVaryingRate类的典型用法代码示例。如果您正苦于以下问题:Java MetricsTimeVaryingRate类的具体用法?Java MetricsTimeVaryingRate怎么用?Java MetricsTimeVaryingRate使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MetricsTimeVaryingRate类属于org.apache.hadoop.metrics.util包,在下文中一共展示了MetricsTimeVaryingRate类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: incMethodTime
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
public void incMethodTime(String name, int time) {
MetricsTimeVaryingRate methodTimeMetrc = getMethodTimeMetrics(name);
if (methodTimeMetrc == null) {
LOG.warn(
"Got incMethodTime() request for method that doesnt exist: " + name);
return; // ignore methods that dont exist.
}
// inc method specific processTime
methodTimeMetrc.inc(time);
// inc general processTime
thriftCall.inc(time);
if (time > slowResponseTime) {
slowThriftCall.inc(time);
}
}
示例2: IPCLoggerChannelMetrics
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
private IPCLoggerChannelMetrics(IPCLoggerChannel ch,
MetricsRecord metricRecords, String name) {
this.ch = ch;
this.metricsRecord = metricRecords;
writeEndToEndLatency = new MetricsTimeVaryingRate("writeEndToEndLatency_"
+ name, registry);
writeRpcLatency = new MetricsTimeVaryingRate("writeRpcLatency_" + name,
registry);
currentQueuedEditsSizeBytes = new MetricsLongValue(
"currentQueuedEditsSizeBytes_" + name, registry);
currentLagTransactions = new MetricsLongValue("currentLagTransactions_"
+ name, registry);
currentLagTimeMicros = new MetricsLongValue("currentLagTimeMicros_" + name,
registry);
isOutOfSync = new MetricsIntValue("isOutOfSync_" + name, registry);
}
示例3: addHLogMetric
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
private void addHLogMetric(HLog.Metric logMetric,
MetricsTimeVaryingRate hadoopMetric) {
if (logMetric.count > 0)
hadoopMetric.inc(logMetric.min);
if (logMetric.count > 1)
hadoopMetric.inc(logMetric.max);
if (logMetric.count > 2) {
int ops = logMetric.count - 2;
hadoopMetric.inc(ops, logMetric.total - logMetric.max - logMetric.min);
}
}
示例4: setUp
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
public void setUp() {
this.registry = new MetricsRegistry();
this.metricsRate = new MetricsRate("metricsRate", registry, "test");
this.intValue = new MetricsIntValue("intValue", registry, "test");
this.varyRate = new MetricsTimeVaryingRate("varyRate", registry, "test");
this.stats = new TestStatistics(registry);
MetricsContext context = MetricsUtil.getContext("hbase");
this.metricsRecord = MetricsUtil.createRecord(context, "test");
this.metricsRecord.setTag("TestStatistics", "test");
//context.registerUpdater(this);
}
示例5: testBasic
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
@Test
public void testBasic() throws Exception {
MetricsRegistry mr = new MetricsRegistry();
MetricsTimeVaryingRate metric = new MetricsTimeVaryingRate("test", mr);
metric.inc(1, 10);
MetricsContext context = MetricsUtil.getContext("test");
MetricsRecord metricsRecord = MetricsUtil.createRecord(context, "test");
metric.pushMetric(metricsRecord);
assertEquals(10, metric.getMaxTime());
}
示例6: getFromRegion
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
protected Result getFromRegion(HRegion region, Get get, Integer lockId,
MetricsTimeVaryingRate latency) throws IOException {
long beginTs = System.nanoTime();
try {
return region.get(get, lockId);
} finally {
ThemisCpStatistics.updateLatency(latency, beginTs,
"row=" + Bytes.toStringBinary(get.getRow()));
}
}
示例7: mutateToRegion
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
protected void mutateToRegion(HRegion region, byte[] row, List<Mutation> mutations,
MetricsTimeVaryingRate latency) throws IOException {
long beginTs = System.nanoTime();
try {
// we have obtained lock, do not need to require lock in mutateRowsWithLocks
region.mutateRowsWithLocks(mutations, Collections.<byte[]>emptySet());
} finally {
ThemisCpStatistics.updateLatency(latency, beginTs, "row=" + Bytes.toStringBinary(row)
+ ", mutationCount=" + mutations.size());
}
}
示例8: readLockBytes
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
protected byte[] readLockBytes(HRegion region, byte[] row, Integer lid, Column column,
long prewriteTs, MetricsTimeVaryingRate latency) throws IOException {
Column lockColumn = ColumnUtil.getLockColumn(column);
Get get = new Get(row).addColumn(lockColumn.getFamily(), lockColumn.getQualifier());
get.setTimeStamp(prewriteTs);
Result result = getFromRegion(region, get, lid, latency);
return result.isEmpty() ? null : result.list().get(0).getValue();
}
示例9: updateLatency
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
public static void updateLatency(MetricsTimeVaryingRate metric, long beginTs, boolean logSlowOp,
String message) {
long consumeInUs = (System.nanoTime() - beginTs) / 1000;
metric.inc(consumeInUs);
if (logSlowOp) {
logSlowOperationInternal(metric.getName(), consumeInUs, message);
}
}
示例10: SepMetrics
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
public SepMetrics(String recordName) {
this.recordName = recordName;
metricsRegistry = new MetricsRegistry();
sepProcessingRate = new MetricsTimeVaryingRate("sepProcessed", metricsRegistry);
lastTimestampInputProcessed = new MetricsLongValue("lastSepTimestamp", metricsRegistry);
context = MetricsUtil.getContext("repository");
metricsRecord = MetricsUtil.createRecord(context, recordName);
context.registerUpdater(this);
mbean = new SepMetricsMXBean(this.metricsRegistry);
}
示例11: getMethodTimeMetrics
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
private MetricsTimeVaryingRate getMethodTimeMetrics(String key) {
return (MetricsTimeVaryingRate) registry.get(key);
}
示例12: createMethodTimeMetrics
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
private MetricsTimeVaryingRate createMethodTimeMetrics(String key) {
return new MetricsTimeVaryingRate(key, this.registry);
}
示例13: getTaskLaunchMsecs
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
@Override
public MetricsTimeVaryingRate getTaskLaunchMsecs() {
return taskLaunchMsecs;
}
示例14: doUpdates
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
@Override
public void doUpdates(MetricsContext context) {
LOG.info("Updating metrics");
int numTrackers = trackerList.size();
long totalMapRefill = 0;
long totalReduceRefill = 0;
int totalRunningMaps = 0;
int totalRunningReduces = 0;
int totalMapSlots = 0;
int totalReduceSlots = 0;
for (TaskTracker tracker : trackerList) {
totalMapRefill += tracker.getAveMapSlotRefillMsecs();
totalReduceRefill += tracker.getAveReduceSlotRefillMsecs();
totalRunningMaps += tracker.getRunningMaps();
totalRunningReduces += tracker.getRunningReduces();
totalMapSlots += tracker.getMaxActualMapTasks();
totalReduceSlots += tracker.getMaxActualReduceTasks();
// If the metrics exists, aggregate the task launch msecs for all
// trackers
TaskTrackerInstrumentation instrumentation =
tracker.getTaskTrackerInstrumentation();
if (instrumentation != null) {
MetricsTimeVaryingRate taskLaunchMsecs =
instrumentation.getTaskLaunchMsecs();
if (taskLaunchMsecs != null) {
taskLaunchMsecs.pushMetric(null);
aggTaskLaunchMsecs.inc(
taskLaunchMsecs.getPreviousIntervalAverageTime());
}
}
}
long avgMapRefill = totalMapRefill / numTrackers;
long avgReduceRefill = totalReduceRefill / numTrackers;
metricsRecord.setMetric("aveMapSlotRefillMsecs", avgMapRefill);
metricsRecord.setMetric("aveReduceSlotRefillMsecs", avgReduceRefill);
metricsRecord.setMetric("maps_running", totalRunningMaps);
metricsRecord.setMetric("reduces_running", totalRunningReduces);
metricsRecord.setMetric("mapTaskSlots", totalMapSlots);
metricsRecord.setMetric("reduceTaskSlots", totalReduceSlots);
for (MetricsBase metricsBase : registry.getMetricsList()) {
metricsBase.pushMetric(metricsRecord);
}
metricsRecord.update();
}
示例15: JournalMetrics
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; //导入依赖的package包/类
JournalMetrics(Journal journal) {
this.journal = journal;
// Create a record for NameNode metrics
MetricsContext metricsContext = MetricsUtil.getContext("dfs");
metricsRecord = MetricsUtil.createRecord(metricsContext, "journalnode");
String journalId = journal.getJournalId();
metricsRecord.setTag("journalid", journalId);
metricsContext.registerUpdater(this);
batchesWrittenWhileLagging = new MetricsTimeVaryingLong(
"batchesWrittenWhileLagging_" + journalId, registry,
"batchesWrittenWhileLagging");
batchesWritten = new MetricsTimeVaryingLong("batchesWritten_" + journalId,
registry, "batchesWritten");
bytesWritten = new MetricsTimeVaryingLong("bytesWritten_" + journalId,
registry, "bytesWritten");
txnsWritten = new MetricsTimeVaryingLong("txnsWritten_" + journalId,
registry, "txnsWritten");
syncTime = new MetricsTimeVaryingRate("syncTimes_" + journalId, registry);
lastWriterEpoch = new MetricsLongValue("lastWriterEpoch_" + journalId,
registry);
lastPromisedEpoch = new MetricsLongValue("lastPromisedEpoch_" + journalId,
registry);
lastWrittenTxId = new MetricsLongValue("lastWrittenTxId_" + journalId,
registry);
currentTxnsLag = new MetricsLongValue("currentTxnsLag_" + journalId,
registry);
// http related metrics
numGetJournalDoGet = new MetricsTimeVaryingLong("numGetEditsServletDoGet_"
+ journalId, registry);
numGetImageDoGet = new MetricsTimeVaryingLong("numGetImageServletDoGet_"
+ journalId, registry);
sizeGetJournalDoGet = new MetricsTimeVaryingLong(
"numListPathsServletDoGet_" + journalId, registry);
LOG.info("Initializing JournalNodeMeterics using context object:"
+ metricsContext.getClass().getName());
}