当前位置: 首页>>代码示例>>Java>>正文


Java MetricsUtil.createRecord方法代码示例

本文整理汇总了Java中org.apache.hadoop.metrics.MetricsUtil.createRecord方法的典型用法代码示例。如果您正苦于以下问题:Java MetricsUtil.createRecord方法的具体用法?Java MetricsUtil.createRecord怎么用?Java MetricsUtil.createRecord使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.metrics.MetricsUtil的用法示例。


在下文中一共展示了MetricsUtil.createRecord方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: RegionServerDynamicMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
private RegionServerDynamicMetrics(HRegionServer regionServer) {
  this.context = MetricsUtil.getContext("hbase");
  this.metricsRecord = MetricsUtil.createRecord(
                          this.context,
                          "RegionServerDynamicStatistics");
  context.registerUpdater(this);
  this.rsDynamicStatistics = new RegionServerDynamicStatistics(this.registry);
  this.regionServer = regionServer;
  try {
    updateMbeanInfoIfMetricsListChanged =
      this.rsDynamicStatistics.getClass().getSuperclass()
      .getDeclaredMethod("updateMbeanInfoIfMetricsListChanged",
          new Class[]{});
    updateMbeanInfoIfMetricsListChanged.setAccessible(true);
  } catch (Exception e) {
    LOG.error(e);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:RegionServerDynamicMetrics.java

示例2: RegionServerMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public RegionServerMetrics() {
  MetricsContext context = MetricsUtil.getContext("hbase");
  metricsRecord = MetricsUtil.createRecord(context, "regionserver");
  String name = Thread.currentThread().getName();
  metricsRecord.setTag("RegionServer", name);
  context.registerUpdater(this);
  // Add jvmmetrics.
  JvmMetrics.init("RegionServer", name);
  // Add Hbase Info metrics
  HBaseInfo.init();

  // export for JMX
  statistics = new RegionServerStatistics(this.registry, name);

  // get custom attributes
  try {
    Object m = ContextFactory.getFactory().getAttribute("hbase.extendedperiod");
    if (m instanceof String) {
      this.extendedPeriod = Long.parseLong((String) m)*1000;
    }
  } catch (IOException ioe) {
    LOG.info("Couldn't load ContextFactory for Metrics config info");
  }

  LOG.info("Initialized");
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:27,代码来源:RegionServerMetrics.java

示例3: HBaseRpcMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public HBaseRpcMetrics(String hostName, String port) {
  MetricsContext context = MetricsUtil.getContext("rpc");
  metricsRecord = MetricsUtil.createRecord(context, "metrics");

  metricsRecord.setTag("port", port);

  LOG.info("Initializing RPC Metrics with hostName="
      + hostName + ", port=" + port);

  context.registerUpdater(this);

  initMethods(HMasterInterface.class);
  initMethods(HMasterRegionInterface.class);
  initMethods(HRegionInterface.class);
  rpcStatistics = new HBaseRPCStatistics(this.registry, hostName, port);
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:17,代码来源:HBaseRpcMetrics.java

示例4: ClusterManagerMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
/**
 * Constructor.
 * @param types The available resource types.
 */
public ClusterManagerMetrics(Collection<ResourceType> types) {
  context = MetricsUtil.getContext(CONTEXT_NAME);
  metricsRecord = MetricsUtil.createRecord(context, CONTEXT_NAME);
  typeToResourceRequested = createTypeToResourceCountMap(types, "requested");
  typeToResourceGranted = createTypeToResourceCountMap(types, "granted");
  typeToResourceRevoked = createTypeToResourceCountMap(types, "revoked");
  typeToResourceReleased = createTypeToResourceCountMap(types, "released");
  typeToPendingCount = createTypeToCountMap(types, "pending");
  typeToRunningCount = createTypeToCountMap(types, "running");
  typeToTotalSlots = createTypeToCountMap(types, "total");
  typeToFreeSlots = createTypeToCountMap(types, "free");
  typeToSchedulerRunTime = createTypeToCountMap(types, "scheduler_runtime");
  sessionStatusToMetrics = createSessionStatusToMetricsMap();
  aliveNodes = new MetricsIntValue("alive_nodes", registry);
  deadNodes = new MetricsIntValue("dead_nodes", registry);
  blacklistedNodes = new MetricsIntValue("blacklisted_nodes", registry);
  numRunningSessions = new MetricsIntValue("num_running_sessions", registry);
  totalSessionCount = new MetricsTimeVaryingInt("total_sessions", registry);
  pendingCallsCount = new MetricsIntValue("num_pending_calls", registry);
  numCJTFailures = new MetricsTimeVaryingInt("num_cjt_failures", registry);
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:26,代码来源:ClusterManagerMetrics.java

示例5: JvmMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
/** Creates a new instance of JvmMetrics */
private JvmMetrics(String processName, String sessionId,
  String recordName) {
    MetricsContext context = MetricsUtil.getContext("jvm");
    metrics = MetricsUtil.createRecord(context, recordName);
    metrics.setTag("processName", processName);
    metrics.setTag("sessionId", sessionId);
    context.registerUpdater(this);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:10,代码来源:JvmMetrics.java

示例6: LocalJobRunnerMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public LocalJobRunnerMetrics(JobConf conf) {
  String sessionId = conf.getSessionId();
  // Initiate JVM Metrics
  JvmMetrics.init("JobTracker", sessionId);
  // Create a record for map-reduce metrics
  MetricsContext context = MetricsUtil.getContext("mapred");
  // record name is jobtracker for compatibility 
  metricsRecord = MetricsUtil.createRecord(context, "jobtracker");
  metricsRecord.setTag("sessionId", sessionId);
  context.registerUpdater(this);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:LocalJobRunnerMetrics.java

示例7: ShuffleClientMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
ShuffleClientMetrics(TaskAttemptID reduceId, JobConf jobConf) {
  this.numCopiers = jobConf.getInt(MRJobConfig.SHUFFLE_PARALLEL_COPIES, 5);

  MetricsContext metricsContext = MetricsUtil.getContext("mapred");
  this.shuffleMetrics = 
    MetricsUtil.createRecord(metricsContext, "shuffleInput");
  this.shuffleMetrics.setTag("user", jobConf.getUser());
  this.shuffleMetrics.setTag("jobName", jobConf.getJobName());
  this.shuffleMetrics.setTag("jobId", reduceId.getJobID().toString());
  this.shuffleMetrics.setTag("taskId", reduceId.toString());
  this.shuffleMetrics.setTag("sessionId", jobConf.getSessionId());
  metricsContext.registerUpdater(this);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:ShuffleClientMetrics.java

示例8: ReplicationSourceMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
/**
 * Constructor used to register the metrics
 * @param id Name of the source this class is monitoring
 */
public ReplicationSourceMetrics(String id) {
  MetricsContext context = MetricsUtil.getContext("hbase");
  String name = Thread.currentThread().getName();
  metricsRecord = MetricsUtil.createRecord(context, "replication");
  metricsRecord.setTag("RegionServer", name);
  context.registerUpdater(this);
  try {
    id = URLEncoder.encode(id, "UTF8");
  } catch (UnsupportedEncodingException e) {
    id = "CAN'T ENCODE UTF8";
  }
  // export for JMX
  replicationStatistics = new ReplicationStatistics(this.registry, "ReplicationSource for " + id);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:ReplicationSourceMetrics.java

示例9: BalancerMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public BalancerMetrics(Configuration conf, InetSocketAddress namenodeAddress) {
  String sessionId = conf.get("session.id");
  JvmMetrics.init("Balancer", sessionId);
  balancerActivityMBean = new BalancerActivityMBean(registry, conf, namenodeAddress);

  MetricsContext context = MetricsUtil.getContext("dfs");
  metricsRecord = MetricsUtil.createRecord(context, "balancer");
  metricsRecord.setTag("sessionId", sessionId);
  context.registerUpdater(this);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:11,代码来源:BalancerMetrics.java

示例10: ThriftMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public ThriftMetrics(int port, Configuration conf, Class<?> iface) {
  slowResponseTime = conf.getLong(
      SLOW_RESPONSE_NANO_SEC, DEFAULT_SLOW_RESPONSE_NANO_SEC);
  context = MetricsUtil.getContext(CONTEXT_NAME);
  metricsRecord = MetricsUtil.createRecord(context, CONTEXT_NAME);

  metricsRecord.setTag("port", port + "");

  LOG.info("Initializing RPC Metrics with port=" + port);

  context.registerUpdater(this);

  createMetricsForMethods(iface);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:15,代码来源:ThriftMetrics.java

示例11: DataNodeMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public DataNodeMetrics(Configuration conf, String storageId) {
  String sessionId = conf.get("session.id"); 
  // Initiate reporting of Java VM metrics
  JvmMetrics.init("DataNode", sessionId);
  

  // Now the MBean for the data node
  datanodeActivityMBean = new DataNodeActivityMBean(registry, storageId);
  
  // Create record for DataNode metrics
  MetricsContext context = MetricsUtil.getContext("dfs");
  metricsRecord = MetricsUtil.createRecord(context, "datanode");
  metricsRecord.setTag("sessionId", sessionId);
  context.registerUpdater(this);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:16,代码来源:DataNodeMetrics.java

示例12: ShuffleServerMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
ShuffleServerMetrics(JobConf conf) {
  MetricsContext context = MetricsUtil.getContext("mapred");
  shuffleMetricsRecord =
                       MetricsUtil.createRecord(context, "shuffleOutput");
  this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId());
  context.registerUpdater(this);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:8,代码来源:TaskTracker.java

示例13: ShuffleServerMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
ShuffleServerMetrics(JobConf conf) {
  MetricsContext context = MetricsUtil.getContext("mapred");
  shuffleMetricsRecord = 
                       MetricsUtil.createRecord(context, "shuffleOutput");
  this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId());
  context.registerUpdater(this);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:8,代码来源:TaskTracker.java

示例14: TaskTrackerMetricsInst

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public TaskTrackerMetricsInst(TaskTracker t) {
  super(t);
  JobConf conf = tt.getJobConf();
  extraJvms = conf.getInt(EXTRA_JVMS, 16);
  String sessionId = conf.getSessionId();
  // Initiate Java VM Metrics
  JvmMetrics.init("TaskTracker", sessionId);
  // Create a record for Task Tracker metrics
  MetricsContext context = MetricsUtil.getContext("mapred");
  metricsRecord = MetricsUtil.createRecord(context, "tasktracker"); //guaranteed never null
  metricsRecord.setTag("sessionId", sessionId);
  context.registerUpdater(this);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:14,代码来源:TaskTrackerMetricsInst.java

示例15: ReplicationSinkMetrics

import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
/**
 * Constructor used to register the metrics
 */
public ReplicationSinkMetrics() {
  MetricsContext context = MetricsUtil.getContext("hbase");
  String name = Thread.currentThread().getName();
  metricsRecord = MetricsUtil.createRecord(context, "replication");
  metricsRecord.setTag("RegionServer", name);
  context.registerUpdater(this);
  // export for JMX
  new ReplicationStatistics(this.registry, "ReplicationSink");
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:13,代码来源:ReplicationSinkMetrics.java


注:本文中的org.apache.hadoop.metrics.MetricsUtil.createRecord方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。