本文整理汇总了Java中org.apache.hadoop.metrics.util.MetricsTimeVaryingLong类的典型用法代码示例。如果您正苦于以下问题:Java MetricsTimeVaryingLong类的具体用法?Java MetricsTimeVaryingLong怎么用?Java MetricsTimeVaryingLong使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MetricsTimeVaryingLong类属于org.apache.hadoop.metrics.util包,在下文中一共展示了MetricsTimeVaryingLong类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readFields
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
int version = in.readByte();
if (version > (int)SCANMETRICS_VERSION) {
throw new IOException("version " + version + " not supported");
}
int metricsCount = in.readInt();
for (int i=0; i<metricsCount; i++) {
String metricsName = in.readUTF();
long v = in.readLong();
MetricsBase mb = registry.get(metricsName);
if ( mb instanceof MetricsTimeVaryingLong) {
((MetricsTimeVaryingLong) mb).inc(v);
} else {
LOG.warn("unsupported metrics type. metrics name: "
+ mb.getName() + ", metrics description: " + mb.getDescription());
}
}
}
示例2: incrLogMetrics
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
/**
* Increase logMetrics in the Raidnode metrics
*/
public static void incrLogMetrics(Map<String, Long> incrMetrics) {
if (incrMetrics == null || incrMetrics.size() == 0) {
return;
}
MetricsRegistry registry = RaidNodeMetrics.getInstance(
RaidNodeMetrics.DEFAULT_NAMESPACE_ID).getMetricsRegistry();
Map<String, MetricsTimeVaryingLong> logMetrics = RaidNodeMetrics.getInstance(
RaidNodeMetrics.DEFAULT_NAMESPACE_ID).logMetrics;
synchronized(logMetrics) {
for (String key : incrMetrics.keySet()) {
if (!logMetrics.containsKey(key)) {
logMetrics.put(key, new MetricsTimeVaryingLong(key, registry));
}
((MetricsTimeVaryingLong)logMetrics.get(key)).inc(incrMetrics.get(key));
}
}
}
示例3: AvatarNodeMetrics
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
public AvatarNodeMetrics(NameNodeMetrics metrics) {
this.metrics = metrics;
ignoreDataNodes = new MetricsIntValue(namePref + "IgnoreDatanodes",
metrics.registry, "Ignoring datanodes");
numIgnoredDatanodes = new MetricsTimeVaryingLong(namePref
+ "NumIgnoredDatanodes", metrics.registry,
"Number of ignored datanodes");
numReportedBlocks = new MetricsTimeVaryingLong(namePref
+ "NumReportedBlocks", metrics.registry,
"Blocks reported through incremental block reports");
numRetryBlocks = new MetricsTimeVaryingLong(namePref + "NumRetryBlocks",
metrics.registry, "Blocks retried for incremental block reports");
numCleanerThreadExceptions = new MetricsTimeVaryingLong(namePref
+ "NumCleanerThreadExceptions", metrics.registry,
"Exceptions when clearing deletion queues");
numIngestFailures = new MetricsIntValue(namePref + "NumIngestFailures",
metrics.registry, "Number of ingest failures");
numCheckpointFailures = new MetricsIntValue(namePref
+ "NumCheckpointFailures", metrics.registry,
"Number of checkpoint failures");
}
示例4: getMetricsTimeVaryingLongArray
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
public MetricsTimeVaryingLong[] getMetricsTimeVaryingLongArray() {
Collection<MetricsBase> mbs = registry.getMetricsList();
ArrayList<MetricsTimeVaryingLong> mlv =
new ArrayList<MetricsTimeVaryingLong>();
for (MetricsBase mb : mbs) {
if ( mb instanceof MetricsTimeVaryingLong) {
mlv.add((MetricsTimeVaryingLong) mb);
}
}
return mlv.toArray(new MetricsTimeVaryingLong[mlv.size()]);
}
示例5: updateCounters
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
/**
* If hbase runs on new version of mapreduce, RecordReader has access to
* counters thus can update counters based on scanMetrics.
* If hbase runs on old version of mapreduce, it won't be able to get
* access to counters and TableRecorderReader can't update counter values.
* @throws IOException
*/
private void updateCounters() throws IOException {
// we can get access to counters only if hbase uses new mapreduce APIs
if (this.getCounter == null) {
return;
}
byte[] serializedMetrics = currentScan.getAttribute(
Scan.SCAN_ATTRIBUTES_METRICS_DATA);
if (serializedMetrics == null || serializedMetrics.length == 0 ) {
return;
}
DataInputBuffer in = new DataInputBuffer();
in.reset(serializedMetrics, 0, serializedMetrics.length);
ScanMetrics scanMetrics = new ScanMetrics();
scanMetrics.readFields(in);
MetricsTimeVaryingLong[] mlvs =
scanMetrics.getMetricsTimeVaryingLongArray();
try {
for (MetricsTimeVaryingLong mlv : mlvs) {
Counter ct = (Counter)this.getCounter.invoke(context,
HBASE_COUNTER_GROUP_NAME, mlv.getName());
ct.increment(mlv.getCurrentIntervalValue());
}
((Counter) this.getCounter.invoke(context, HBASE_COUNTER_GROUP_NAME,
"NUM_SCANNER_RESTARTS")).increment(numRestarts);
} catch (Exception e) {
LOG.debug("can't update counter." + StringUtils.stringifyException(e));
}
}
示例6: createMetrics
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
private void createMetrics() {
for (TaskError error : knownErrors.values()) {
LOG.info("metricsKey:" + error.metricsKey);
errorCountsMetrics.put(error, new MetricsTimeVaryingLong(
error.metricsKey, registry, error.description));
}
errorCountsMetrics.put(UNKNOWN_ERROR, new MetricsTimeVaryingLong(
UNKNOWN_ERROR.metricsKey, registry, UNKNOWN_ERROR.description));
}
示例7: testStuckDataNode
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
/** This creates a slow writer and check to see
* if pipeline heartbeats work fine
*/
public void testStuckDataNode() throws Exception {
final int DATANODE_NUM = 3;
Configuration conf = new Configuration();
final int timeout = 8000;
conf.setInt("dfs.socket.timeout",timeout);
final Path p = new Path("/pipelineHeartbeat/foo");
System.out.println("p=" + p);
MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();
DataNodeMetrics metrics = cluster.getDataNodes().get(0).myMetrics;
MetricsTimeVaryingLong spyBytesWritten = spy(metrics.bytesWritten);
DelayAnswer delayAnswer = new DelayAnswer();
doAnswer(delayAnswer).when(spyBytesWritten).inc(anyInt());
metrics.bytesWritten = spyBytesWritten;
try {
// create a new file.
FSDataOutputStream stm = fs.create(p);
stm.write(1);
stm.sync();
stm.write(2);
stm.close();
// verify that entire file is good
FSDataInputStream in = fs.open(p);
assertEquals(1, in.read());
assertEquals(2, in.read());
in.close();
} finally {
fs.close();
cluster.shutdown();
}
}
示例8: verifyMetrics
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
public static void verifyMetrics(FileSystem fileSys,
RaidNode cnode, LOGTYPES type, LOGRESULTS result, String tag, long expected,
boolean greater) {
String counterName = LogUtils.getCounterName(fileSys, type, result, tag);
Map<String, MetricsTimeVaryingLong> logMetrics = RaidNodeMetrics.getInstance(
RaidNodeMetrics.DEFAULT_NAMESPACE_ID).logMetrics;
String message = "expect " + expected + (greater? " >= ": " = ") + counterName;
long actual = 0L;
synchronized(logMetrics) {
if (expected == 0L) {
if (greater == false) {
assertTrue(message, !logMetrics.containsKey(counterName));
} else {
actual = logMetrics.containsKey(counterName)?logMetrics.get(counterName).getCurrentIntervalValue():
0;
assertTrue(message + " but " + actual, actual >= 0L);
}
} else {
actual = logMetrics.get(counterName).getCurrentIntervalValue();
if (greater == false) {
assertEquals(message + " but " + actual, new Long(expected),
new Long(actual));
} else {
assertTrue(message + " but " + actual, actual >= expected);
}
}
}
}
示例9: createTypeToResourceCountMap
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
/**
* Create a map of resource type -> cumulative counts.
* @param resourceTypes The resource types.
* @param actionType A string indicating granted, revoked, etc.
* @return The map.
*/
private Map<ResourceType, MetricsTimeVaryingLong>
createTypeToResourceCountMap(
Collection<ResourceType> resourceTypes, String actionType) {
Map<ResourceType, MetricsTimeVaryingLong> m =
new HashMap<ResourceType, MetricsTimeVaryingLong>();
for (ResourceType t : resourceTypes) {
String name = (actionType + "_" + t).toLowerCase();
MetricsTimeVaryingLong value = new MetricsTimeVaryingLong(name, registry);
m.put(t, value);
}
return m;
}
示例10: updateCounters
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
private void updateCounters() throws IOException {
// we can get access to counters only if hbase uses new mapreduce APIs
if (this.getCounter == null) {
return;
}
byte[] serializedMetrics = currentScan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
if (serializedMetrics == null || serializedMetrics.length == 0) {
return;
}
DataInputBuffer in = new DataInputBuffer();
in.reset(serializedMetrics, 0, serializedMetrics.length);
ScanMetrics scanMetrics = new ScanMetrics();
scanMetrics.readFields(in);
MetricsTimeVaryingLong[] mlvs = scanMetrics.getMetricsTimeVaryingLongArray();
try {
for (MetricsTimeVaryingLong mlv : mlvs) {
Counter ct = (Counter) this.getCounter.invoke(context, HBASE_COUNTER_GROUP_NAME,
mlv.getName());
ct.increment(mlv.getCurrentIntervalValue());
}
((Counter) this.getCounter.invoke(context, HBASE_COUNTER_GROUP_NAME, "NUM_SCANNER_RESTARTS"))
.increment(numRestarts);
} catch (Exception e) {
LOG.debug("can't update counter." + StringUtils.stringifyException(e));
}
}
示例11: TaskErrorCollector
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
public TaskErrorCollector(Configuration conf) {
errorCountsQueue = new LinkedList<Map<TaskError, Integer>>();
startTimeQueue = new LinkedList<Long>();
errorCountsMetrics = new HashMap<TaskError, MetricsTimeVaryingLong>();
MetricsContext context = MetricsUtil.getContext("mapred");
metricsRecord = MetricsUtil.createRecord(context, "taskerror");
registry = new MetricsRegistry();
windowLength = conf.getInt(WINDOW_LENGTH_KEY, WINDOW_LENGTH);
numWindows = conf.getInt(NUM_WINDOWS_KEY, NUM_WINDOWS);
context.registerUpdater(this);
String configFilePath = conf.get(CONFIG_FILE_KEY);
if (configFilePath == null) {
// Search the class path if it is not configured
URL u = TaskErrorCollector.class.getClassLoader().getResource(ERROR_XML);
if (u != null) {
configFilePath = u.getPath();
}
}
if (configFilePath == null) {
LOG.warn("No " + CONFIG_FILE_KEY + " given in conf. " +
TaskErrorCollector.class.getSimpleName() +
" will see every error as UNKNOWN_ERROR.");
knownErrors = Collections.emptyMap();
} else {
knownErrors = parseConfigFile(configFilePath);
}
createMetrics();
sinceStartErrorCounts = createErrorCountsMap();
}
示例12: createMetrics
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
private void createMetrics() {
for (TaskError error : knownErrors.values()) {
System.out.println("metricsKey:" + error.metricsKey);
errorCountsMetrics.put(error, new MetricsTimeVaryingLong(
error.metricsKey, registry, error.description));
}
errorCountsMetrics.put(UNKNOWN_ERROR, new MetricsTimeVaryingLong(
UNKNOWN_ERROR.metricsKey, registry, UNKNOWN_ERROR.description));
}
示例13: JournalMetrics
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; //导入依赖的package包/类
JournalMetrics(Journal journal) {
this.journal = journal;
// Create a record for NameNode metrics
MetricsContext metricsContext = MetricsUtil.getContext("dfs");
metricsRecord = MetricsUtil.createRecord(metricsContext, "journalnode");
String journalId = journal.getJournalId();
metricsRecord.setTag("journalid", journalId);
metricsContext.registerUpdater(this);
batchesWrittenWhileLagging = new MetricsTimeVaryingLong(
"batchesWrittenWhileLagging_" + journalId, registry,
"batchesWrittenWhileLagging");
batchesWritten = new MetricsTimeVaryingLong("batchesWritten_" + journalId,
registry, "batchesWritten");
bytesWritten = new MetricsTimeVaryingLong("bytesWritten_" + journalId,
registry, "bytesWritten");
txnsWritten = new MetricsTimeVaryingLong("txnsWritten_" + journalId,
registry, "txnsWritten");
syncTime = new MetricsTimeVaryingRate("syncTimes_" + journalId, registry);
lastWriterEpoch = new MetricsLongValue("lastWriterEpoch_" + journalId,
registry);
lastPromisedEpoch = new MetricsLongValue("lastPromisedEpoch_" + journalId,
registry);
lastWrittenTxId = new MetricsLongValue("lastWrittenTxId_" + journalId,
registry);
currentTxnsLag = new MetricsLongValue("currentTxnsLag_" + journalId,
registry);
// http related metrics
numGetJournalDoGet = new MetricsTimeVaryingLong("numGetEditsServletDoGet_"
+ journalId, registry);
numGetImageDoGet = new MetricsTimeVaryingLong("numGetImageServletDoGet_"
+ journalId, registry);
sizeGetJournalDoGet = new MetricsTimeVaryingLong(
"numListPathsServletDoGet_" + journalId, registry);
LOG.info("Initializing JournalNodeMeterics using context object:"
+ metricsContext.getClass().getName());
}