当前位置: 首页>>代码示例>>Java>>正文


Java MetricsCollector.addRecord方法代码示例

本文整理汇总了Java中org.apache.hadoop.metrics2.MetricsCollector.addRecord方法的典型用法代码示例。如果您正苦于以下问题:Java MetricsCollector.addRecord方法的具体用法?Java MetricsCollector.addRecord怎么用?Java MetricsCollector.addRecord使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.metrics2.MetricsCollector的用法示例。


在下文中一共展示了MetricsCollector.addRecord方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
  StartupProgressView prog = startupProgress.createView();
  MetricsRecordBuilder builder = collector.addRecord(
    STARTUP_PROGRESS_METRICS_INFO);

  builder.addCounter(info("ElapsedTime", "overall elapsed time"),
    prog.getElapsedTime());
  builder.addGauge(info("PercentComplete", "overall percent complete"),
    prog.getPercentComplete());

  for (Phase phase: prog.getPhases()) {
    addCounter(builder, phase, "Count", " count", prog.getCount(phase));
    addCounter(builder, phase, "ElapsedTime", " elapsed time",
      prog.getElapsedTime(phase));
    addCounter(builder, phase, "Total", " total", prog.getTotal(phase));
    addGauge(builder, phase, "PercentComplete", " percent complete",
      prog.getPercentComplete(phase));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:StartupProgressMetrics.java

示例2: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
  MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName);

  if (wrapper != null) {
    mrb.addGauge(Interns.info(QUEUE_SIZE_NAME, QUEUE_SIZE_DESC), wrapper.getTotalQueueSize())
        .addGauge(Interns.info(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC),
            wrapper.getGeneralQueueLength())
        .addGauge(Interns.info(REPLICATION_QUEUE_NAME,
            REPLICATION_QUEUE_DESC), wrapper.getReplicationQueueLength())
        .addGauge(Interns.info(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC),
            wrapper.getPriorityQueueLength())
        .addGauge(Interns.info(NUM_OPEN_CONNECTIONS_NAME,
            NUM_OPEN_CONNECTIONS_DESC), wrapper.getNumOpenConnections())
        .addGauge(Interns.info(NUM_ACTIVE_HANDLER_NAME,
            NUM_ACTIVE_HANDLER_DESC), wrapper.getActiveRpcHandlerCount());
  }

  metricsRegistry.snapshot(mrb, all);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:MetricsHBaseServerSourceImpl.java

示例3: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
  MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName);

  if (stochasticCosts != null) {
    synchronized (stochasticCosts) {
      for (Map.Entry<String, Map<String, Double>> tableEntry : stochasticCosts.entrySet()) {
        for (Map.Entry<String, Double> costEntry : tableEntry.getValue().entrySet()) {
          String attrName = tableEntry.getKey() + TABLE_FUNCTION_SEP + costEntry.getKey();
          Double cost = costEntry.getValue();
          String functionDesc = costFunctionDescs.get(costEntry.getKey());
          if (functionDesc == null) functionDesc = costEntry.getKey();
          metricsRecordBuilder.addGauge(Interns.info(attrName, functionDesc), cost);
        }
      }
    }
  }
  metricsRegistry.snapshot(metricsRecordBuilder, all);
}
 
开发者ID:apache,项目名称:hbase,代码行数:20,代码来源:MetricsStochasticBalancerSourceImpl.java

示例4: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
  MetricsRecordBuilder record = metricsCollector.addRecord(metricsRegistry.info());
  if (wrapper != null) {
    // Summarize the tables
    Map<String,Entry<Long,Long>> tableUsages = wrapper.getTableSpaceUtilization();
    String tableSummary = "[]";
    if (tableUsages != null && !tableUsages.isEmpty()) {
      tableSummary = generateJsonQuotaSummary(tableUsages.entrySet(), "table");
    }
    record.tag(Interns.info(TABLE_QUOTA_USAGE_NAME, TABLE_QUOTA_USAGE_DESC), tableSummary);

    // Summarize the namespaces
    String nsSummary = "[]";
    Map<String,Entry<Long,Long>> namespaceUsages = wrapper.getNamespaceSpaceUtilization();
    if (namespaceUsages != null && !namespaceUsages.isEmpty()) {
      nsSummary = generateJsonQuotaSummary(namespaceUsages.entrySet(), "namespace");
    }
    record.tag(Interns.info(NS_QUOTA_USAGE_NAME, NS_QUOTA_USAGE_DESC), nsSummary);
  }
  metricsRegistry.snapshot(record, all);
}
 
开发者ID:apache,项目名称:hbase,代码行数:23,代码来源:MetricsMasterQuotaSourceImpl.java

示例5: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {

  MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName);

  // masterWrapper can be null because this function is called inside of init.
  if (masterWrapper != null) {
    metricsRecordBuilder
        .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME,
            MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime())
        .addGauge(Interns.info(MASTER_START_TIME_NAME,
            MASTER_START_TIME_DESC), masterWrapper.getStartTime())
        .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC),
            masterWrapper.getAverageLoad())
        .tag(Interns.info(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC),
              masterWrapper.getRegionServers())
        .addGauge(Interns.info(NUM_REGION_SERVERS_NAME,
            NUMBER_OF_REGION_SERVERS_DESC), masterWrapper.getNumRegionServers())
        .tag(Interns.info(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC),
              masterWrapper.getDeadRegionServers())
        .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME,
            NUMBER_OF_DEAD_REGION_SERVERS_DESC),
            masterWrapper.getNumDeadRegionServers())
        .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC),
            masterWrapper.getZookeeperQuorum())
        .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName())
        .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId())
        .tag(Interns.info(IS_ACTIVE_MASTER_NAME,
            IS_ACTIVE_MASTER_DESC),
            String.valueOf(masterWrapper.getIsActiveMaster()));
  }

  metricsRegistry.snapshot(metricsRecordBuilder, all);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:MetricsMasterSourceImpl.java

示例6: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
/**
 * Yes this is a get function that doesn't return anything.  Thanks Hadoop for breaking all
 * expectations of java programmers.  Instead of returning anything Hadoop metrics expects
 * getMetrics to push the metrics into the collector.
 *
 * @param collector the collector
 * @param all       get all the metrics regardless of when they last changed.
 */
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
  MetricsRecordBuilder mrb = collector.addRecord(metricsName);

  if (regionSources != null) {
    for (MetricsRegionSource regionMetricSource : regionSources) {
      if (regionMetricSource instanceof MetricsRegionSourceImpl) {
        ((MetricsRegionSourceImpl) regionMetricSource).snapshot(mrb, all);
      }
    }
    mrb.addGauge(Interns.info(NUM_REGIONS, NUMBER_OF_REGIONS_DESC), regionSources.size());
    metricsRegistry.snapshot(mrb, all);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:MetricsRegionAggregateSourceImpl.java

示例7: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
@Override public void getMetrics(MetricsCollector collector, boolean all) {
  MetricsRecordBuilder builder = collector.addRecord(recordName);
  if (null != context) {
    builder.setContext(context);
  }

  // Synchronizing here ensures that the dropwizard metrics collection side is excluded from executing
  // at the same time we are pulling elements from the queues.
  synchronized (this) {
    snapshotAllMetrics(builder);
  }

  metrics2Registry.snapshot(builder, all);
}
 
开发者ID:joshelser,项目名称:dropwizard-hadoop-metrics2,代码行数:15,代码来源:HadoopMetrics2Reporter.java

示例8: snapshotAllMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
/**
 * Iterates over the MetricRegistry and adds them to the {@code collector}.
 *
 * @param collector A metrics collector
 */
public void snapshotAllMetrics(MetricRegistry metricRegistry,
                               MetricsCollector collector) {
  MetricRegistryInfo info = metricRegistry.getMetricRegistryInfo();
  MetricsRecordBuilder builder = collector.addRecord(Interns.info(info.getMetricsName(),
      info.getMetricsDescription()));
  builder.setContext(info.getMetricsContext());

  snapshotAllMetrics(metricRegistry, builder);
}
 
开发者ID:apache,项目名称:hbase,代码行数:15,代码来源:HBaseMetrics2HadoopMetricsAdapter.java

示例9: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {

  MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName);

  // masterWrapper can be null because this function is called inside of init.
  if (masterWrapper != null) {
    metricsRecordBuilder
        .addGauge(Interns.info(MERGE_PLAN_COUNT_NAME, MERGE_PLAN_COUNT_DESC),
            masterWrapper.getMergePlanCount())
        .addGauge(Interns.info(SPLIT_PLAN_COUNT_NAME, SPLIT_PLAN_COUNT_DESC),
            masterWrapper.getSplitPlanCount())
        .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME,
            MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime())
        .addGauge(Interns.info(MASTER_START_TIME_NAME,
            MASTER_START_TIME_DESC), masterWrapper.getStartTime())
        .addGauge(Interns.info(MASTER_FINISHED_INITIALIZATION_TIME_NAME, MASTER_FINISHED_INITIALIZATION_TIME_DESC),
            masterWrapper.getMasterInitializationTime())
        .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC),
            masterWrapper.getAverageLoad())
        .tag(Interns.info(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC),
              masterWrapper.getRegionServers())
        .addGauge(Interns.info(NUM_REGION_SERVERS_NAME,
            NUMBER_OF_REGION_SERVERS_DESC), masterWrapper.getNumRegionServers())
        .tag(Interns.info(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC),
              masterWrapper.getDeadRegionServers())
        .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME,
            NUMBER_OF_DEAD_REGION_SERVERS_DESC),
            masterWrapper.getNumDeadRegionServers())
        .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC),
            masterWrapper.getZookeeperQuorum())
        .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName())
        .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId())
        .tag(Interns.info(IS_ACTIVE_MASTER_NAME,
            IS_ACTIVE_MASTER_DESC),
            String.valueOf(masterWrapper.getIsActiveMaster()));
  }

  metricsRegistry.snapshot(metricsRecordBuilder, all);
}
 
开发者ID:apache,项目名称:hbase,代码行数:41,代码来源:MetricsMasterSourceImpl.java

示例10: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
  MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName);

  // masterWrapper can be null because this function is called inside of init.
  if (masterWrapper != null) {
    metricsRecordBuilder
        .addGauge(Interns.info(NUM_MASTER_WALS_NAME, NUM_MASTER_WALS_DESC),
            masterWrapper.getNumWALFiles());
  }

  metricsRegistry.snapshot(metricsRecordBuilder, all);
}
 
开发者ID:apache,项目名称:hbase,代码行数:14,代码来源:MetricsMasterProcSourceImpl.java

示例11: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
/**
 * Yes this is a get function that doesn't return anything.  Thanks Hadoop for breaking all
 * expectations of java programmers.  Instead of returning anything Hadoop metrics expects
 * getMetrics to push the metrics into the collector.
 *
 * @param collector the collector
 * @param all       get all the metrics regardless of when they last changed.
 */
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
  MetricsRecordBuilder mrb = collector.addRecord(metricsName);

  if (tableSources != null) {
    for (MetricsTableSource tableMetricSource : tableSources.values()) {
      if (tableMetricSource instanceof MetricsTableSourceImpl) {
        ((MetricsTableSourceImpl) tableMetricSource).snapshot(mrb, all);
      }
    }
    mrb.addGauge(Interns.info(NUM_TABLES, NUMBER_OF_TABLES_DESC), tableSources.size());
    metricsRegistry.snapshot(mrb, all);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:23,代码来源:MetricsTableAggregateSourceImpl.java

示例12: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
  MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName);

  if (wrapper != null) {
    mrb.addGauge(Interns.info(QUEUE_SIZE_NAME, QUEUE_SIZE_DESC), wrapper.getTotalQueueSize())
        .addGauge(Interns.info(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC),
            wrapper.getGeneralQueueLength())
        .addGauge(Interns.info(REPLICATION_QUEUE_NAME,
            REPLICATION_QUEUE_DESC), wrapper.getReplicationQueueLength())
        .addGauge(Interns.info(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC),
            wrapper.getPriorityQueueLength())
        .addGauge(Interns.info(NUM_OPEN_CONNECTIONS_NAME,
            NUM_OPEN_CONNECTIONS_DESC), wrapper.getNumOpenConnections())
        .addGauge(Interns.info(NUM_ACTIVE_HANDLER_NAME,
            NUM_ACTIVE_HANDLER_DESC), wrapper.getActiveRpcHandlerCount())
        .addCounter(Interns.info(NUM_GENERAL_CALLS_DROPPED_NAME,
            NUM_GENERAL_CALLS_DROPPED_DESC), wrapper.getNumGeneralCallsDropped())
        .addCounter(Interns.info(NUM_LIFO_MODE_SWITCHES_NAME,
            NUM_LIFO_MODE_SWITCHES_DESC), wrapper.getNumLifoModeSwitches())
        .addGauge(Interns.info(WRITE_QUEUE_NAME, WRITE_QUEUE_DESC),
            wrapper.getWriteQueueLength())
        .addGauge(Interns.info(READ_QUEUE_NAME, READ_QUEUE_DESC),
            wrapper.getReadQueueLength())
        .addGauge(Interns.info(SCAN_QUEUE_NAME, SCAN_QUEUE_DESC),
            wrapper.getScanQueueLength())
        .addGauge(Interns.info(NUM_ACTIVE_WRITE_HANDLER_NAME, NUM_ACTIVE_WRITE_HANDLER_DESC),
          wrapper.getActiveWriteRpcHandlerCount())
        .addGauge(Interns.info(NUM_ACTIVE_READ_HANDLER_NAME, NUM_ACTIVE_READ_HANDLER_DESC),
          wrapper.getActiveReadRpcHandlerCount())
        .addGauge(Interns.info(NUM_ACTIVE_SCAN_HANDLER_NAME, NUM_ACTIVE_SCAN_HANDLER_DESC),
          wrapper.getActiveScanRpcHandlerCount());
  }

  metricsRegistry.snapshot(mrb, all);
}
 
开发者ID:apache,项目名称:hbase,代码行数:37,代码来源:MetricsHBaseServerSourceImpl.java

示例13: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
  MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName);

  // wrapper can be null because this function is called inside of init.
  if (wrapper != null) {
    mrb.addCounter(Interns.info(CHECKSUM_FAILURES_KEY, CHECKSUM_FAILURES_DESC),
      wrapper.getChecksumFailures());
  }

  metricsRegistry.snapshot(mrb, all);
}
 
开发者ID:apache,项目名称:hbase,代码行数:13,代码来源:MetricsIOSourceImpl.java

示例14: getMetrics

import org.apache.hadoop.metrics2.MetricsCollector; //导入方法依赖的package包/类
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
  collector.addRecord("foo");
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:5,代码来源:TestMetricsAnnotations.java


注:本文中的org.apache.hadoop.metrics2.MetricsCollector.addRecord方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。