本文整理汇总了Java中org.apache.hadoop.metrics.util.MetricsBase.pushMetric方法的典型用法代码示例。如果您正苦于以下问题:Java MetricsBase.pushMetric方法的具体用法?Java MetricsBase.pushMetric怎么用?Java MetricsBase.pushMetric使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.metrics.util.MetricsBase
的用法示例。
在下文中一共展示了MetricsBase.pushMetric方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
@Override
public void doUpdates(MetricsContext context) {
// Get the fair scheduler metrics
if (scheduler != null) {
scheduler.submitMetrics(metricsRecord);
}
// Get the number of pending calls.
setNumPendingCalls(sessionNotifier.getNumPendingCalls());
// Not synchronized on the ClusterManagerMetrics object.
// The list of metrics in the registry is modified only in the constructor.
// And pushMetrics() is thread-safe.
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
metricsRecord.update();
}
示例2: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
/**
* Push the metrics to the monitoring subsystem on doUpdate() call.
*/
public void doUpdates(final MetricsContext context) {
// getMetricsList() and pushMetric() are thread safe methods
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
metricsRecord.update();
}
示例3: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
@Override
public void doUpdates(MetricsContext context) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
示例4: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
public void doUpdates(MetricsContext unused) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
示例5: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
@Override
public void doUpdates(MetricsContext context) {
// Get the fair scheduler metrics
if (scheduler != null) {
scheduler.submitMetrics(metricsRecord);
}
for (Map.Entry<ResourceType, Long> currStart :
typeToSchedulerCurrentCycleStart.entrySet()) {
long start = currStart.getValue();
if (start > 0) {
// This means that there's a scheduling cycle in progress.
int currCycleRun = (int)(System.currentTimeMillis() - start);
typeToSchedulerRunTime.get(currStart.getKey()).set(currCycleRun);
}
}
// Get the number of pending calls.
setNumPendingCalls(sessionNotifier.getNumPendingCalls());
// Not synchronized on the ClusterManagerMetrics object.
// The list of metrics in the registry is modified only in the constructor.
// And pushMetrics() is thread-safe.
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
metricsRecord.update();
}
示例6: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
/**
* Push the metrics to the monitoring subsystem on doUpdate() call.
*/
public void doUpdates(MetricsContext context) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
示例7: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
*/
public void doUpdates(MetricsContext unused) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.setMetric("client.ls.calls", getAndResetLsCalls());
metricsRecord.update();
}
示例8: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
*/
public void doUpdates(MetricsContext unused) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
示例9: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
*/
@Override
public void doUpdates(MetricsContext unused) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
示例10: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
@Override
public void doUpdates(MetricsContext context) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
示例11: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
* We set the metrics value within this function before pushing it out.
* FSNamesystem updates its own local variables which are
* light weight compared to Metrics counters.
*
* Some of the metrics are explicity casted to int. Few metrics collectors
* do not handle long values. It is safe to cast to int for now as all these
* values fit in int value.
* Metrics related to DFS capacity are stored in bytes which do not fit in
* int, so they are rounded to GB
*/
public void doUpdates(MetricsContext unused) {
/**
* ToFix
* If the metrics counter were instead stored in the metrics objects themselves
* we could avoid copying the values on each update.
*/
synchronized (this) {
filesTotal.set((int)fsNameSystem.getFilesTotal());
blocksTotal.set((int)fsNameSystem.getBlocksTotal());
capacityTotalGB.set(roundBytesToGBytes(fsNameSystem.getCapacityTotal()));
capacityUsedGB.set(roundBytesToGBytes(fsNameSystem.getCapacityUsed()));
capacityRemainingGB.set(roundBytesToGBytes(fsNameSystem.
getCapacityRemaining()));
totalLoad.set(fsNameSystem.getTotalLoad());
corruptBlocks.set((int)fsNameSystem.getCorruptReplicaBlocks());
excessBlocks.set((int)fsNameSystem.getExcessBlocks());
pendingDeletionBlocks.set((int)fsNameSystem.getPendingDeletionBlocks());
pendingReplicationBlocks.set((int)fsNameSystem.
getPendingReplicationBlocks());
underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks());
scheduledReplicationBlocks.set((int)fsNameSystem.
getScheduledReplicationBlocks());
missingBlocks.set((int)fsNameSystem.getMissingBlocksCount());
blockCapacity.set(fsNameSystem.getBlockCapacity());
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
示例12: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
*/
@Override
public void doUpdates(MetricsContext unused) {
synchronized (this) {
for (MetricsBase metricsBase : registry.getMetricsList()) {
metricsBase.pushMetric(metricsRecord);
}
metricsRecord.setMetric("aveMapSlotRefillMsecs",
tt.getAveMapSlotRefillMsecs());
metricsRecord.setMetric("aveReduceSlotRefillMsecs",
tt.getAveReduceSlotRefillMsecs());
metricsRecord.setMetric("maps_running", tt.getRunningMaps());
metricsRecord.setMetric("reduces_running", tt.getRunningReduces());
metricsRecord.setMetric("mapTaskSlots", (short)tt.getMaxActualMapTasks());
metricsRecord.setMetric("reduceTaskSlots",
(short)tt.getMaxActualReduceTasks());
metricsRecord.incrMetric("map_tasks_completed",
numCompletedMapTasks);
metricsRecord.incrMetric("reduce_tasks_completed",
numCompletedReduceTasks);
metricsRecord.incrMetric("tasks_completed", numCompletedTasks);
metricsRecord.incrMetric("tasks_failed_timeout", timedoutTasks);
metricsRecord.incrMetric("tasks_failed_ping", tasksFailedPing);
metricsRecord.setMetric("unaccounted_memory", unaccountedMemory);
numCompletedMapTasks = 0;
numCompletedReduceTasks = 0;
numCompletedTasks = 0;
timedoutTasks = 0;
tasksFailedPing = 0;
}
metricsRecord.update();
}
示例13: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
/**
* Push the metrics to the monitoring subsystem on doUpdate() call.
*/
public void doUpdates(MetricsContext context) {
synchronized (this) {
// ToFix - fix server to use the following two metrics directly so
// the metrics do not have be copied here.
numOpenConnections.set(myServer.getNumOpenConnections());
callQueueLen.set(myServer.getCallQueueLen());
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
示例14: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
* We set the metrics value within this function before pushing it out.
* FSNamesystem updates its own local variables which are
* light weight compared to Metrics counters.
*
* Some of the metrics are explicity casted to int. Few metrics collectors
* do not handle long values. It is safe to cast to int for now as all these
* values fit in int value.
* Metrics related to DFS capacity are stored in bytes which do not fit in
* int, so they are rounded to GB
*/
public void doUpdates(MetricsContext unused) {
/**
* ToFix
* If the metrics counter were instead stored in the metrics objects themselves
* we could avoid copying the values on each update.
*/
synchronized (this) {
filesTotal.set((int) fsNameSystem.getFilesAndDirectoriesTotal());
blocksTotal.set((int)fsNameSystem.getBlocksTotal());
diskSpaceTotalGB.set(roundBytesToGBytes(fsNameSystem.getDiskSpaceTotal()));
capacityTotalGB.set(roundBytesToGBytes(fsNameSystem.getCapacityTotal()));
capacityUsedGB.set(roundBytesToGBytes(fsNameSystem.getCapacityUsed()));
capacityRemainingGB.set(roundBytesToGBytes(fsNameSystem.
getCapacityRemaining()));
totalLoad.set(fsNameSystem.getTotalLoad());
corruptBlocks.set((int)fsNameSystem.getCorruptReplicaBlocks());
excessBlocks.set((int)fsNameSystem.getExcessBlocks());
pendingDeletionBlocks.set((int)fsNameSystem.getPendingDeletionBlocks());
pendingReplicationBlocks.set((int)fsNameSystem.
getPendingReplicationBlocks());
underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks());
scheduledReplicationBlocks.set((int)fsNameSystem.
getScheduledReplicationBlocks());
missingBlocks.set((int)fsNameSystem.getMissingBlocksCount());
blockCapacity.set(fsNameSystem.getBlockCapacity());
numLeases.set(fsNameSystem.leaseManager.countLease());
numUnderConstructionFiles.set(fsNameSystem.leaseManager.countPath());
upgradeTime.set(fsNameSystem.getUpgradeTime());
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}
示例15: doUpdates
import org.apache.hadoop.metrics.util.MetricsBase; //导入方法依赖的package包/类
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
* We set the metrics value within this function before pushing it out.
* FSNamesystem updates its own local variables which are
* light weight compared to Metrics counters.
*
* Some of the metrics are explicity casted to int. Few metrics collectors
* do not handle long values. It is safe to cast to int for now as all these
* values fit in int value.
* Metrics related to DFS capacity are stored in bytes which do not fit in
* int, so they are rounded to GB
*/
public void doUpdates(MetricsContext unused) {
/**
* ToFix
* If the metrics counter were instead stored in the metrics objects themselves
* we could avoid copying the values on each update.
*/
synchronized (this) {
FSNamesystem fsNameSystem = FSNamesystem.getFSNamesystem();
filesTotal.set((int)fsNameSystem.getFilesTotal());
blocksTotal.set((int)fsNameSystem.getBlocksTotal());
capacityTotalGB.set(roundBytesToGBytes(fsNameSystem.getCapacityTotal()));
capacityUsedGB.set(roundBytesToGBytes(fsNameSystem.getCapacityUsed()));
capacityRemainingGB.set(roundBytesToGBytes(fsNameSystem.
getCapacityRemaining()));
totalLoad.set(fsNameSystem.getTotalLoad());
corruptBlocks.set((int)fsNameSystem.getCorruptReplicaBlocks());
excessBlocks.set((int)fsNameSystem.getExcessBlocks());
pendingDeletionBlocks.set((int)fsNameSystem.getPendingDeletionBlocks());
pendingReplicationBlocks.set((int)fsNameSystem.
getPendingReplicationBlocks());
underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks());
scheduledReplicationBlocks.set((int)fsNameSystem.
getScheduledReplicationBlocks());
missingBlocks.set((int)fsNameSystem.getMissingBlocksCount());
blockCapacity.set(fsNameSystem.getBlockCapacity());
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
}