当前位置: 首页>>代码示例>>Java>>正文


Java MetricsRecord.metrics方法代码示例

本文整理汇总了Java中org.apache.hadoop.metrics2.MetricsRecord.metrics方法的典型用法代码示例。如果您正苦于以下问题:Java MetricsRecord.metrics方法的具体用法?Java MetricsRecord.metrics怎么用?Java MetricsRecord.metrics使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.metrics2.MetricsRecord的用法示例。


在下文中一共展示了MetricsRecord.metrics方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: putMetrics

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
@Override
public void putMetrics(MetricsRecord record) {
  writer.print(record.timestamp());
  writer.print(" ");
  writer.print(record.context());
  writer.print(".");
  writer.print(record.name());
  String separator = ": ";
  for (MetricsTag tag : record.tags()) {
    writer.print(separator);
    separator = ", ";
    writer.print(tag.name());
    writer.print("=");
    writer.print(tag.value());
  }
  for (AbstractMetric metric : record.metrics()) {
    writer.print(separator);
    separator = ", ";
    writer.print(metric.name());
    writer.print("=");
    writer.print(metric.value());
  }
  writer.println();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:25,代码来源:FileSink.java

示例2: update

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
/**
 * Update the cache and return the current cached record
 * @param mr the update record
 * @param includingTags cache tag values (for later lookup by name) if true
 * @return the updated cache record
 */
public Record update(MetricsRecord mr, boolean includingTags) {
  String name = mr.name();
  RecordCache recordCache = map.get(name);
  if (recordCache == null) {
    recordCache = new RecordCache();
    map.put(name, recordCache);
  }
  Collection<MetricsTag> tags = mr.tags();
  Record record = recordCache.get(tags);
  if (record == null) {
    record = new Record();
    recordCache.put(tags, record);
  }
  for (AbstractMetric m : mr.metrics()) {
    record.metrics.put(m.name(), m);
  }
  if (includingTags) {
    // mostly for some sinks that include tags as part of a dense schema
    for (MetricsTag t : mr.tags()) {
      record.tags.put(t.name(), t.value());
    }
  }
  return record;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:31,代码来源:MetricsCache.java

示例3: putMetrics

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
@Override
public void putMetrics(MetricsRecord record) {
  final String prefix = "threadSourceRec";
  if (record.name().startsWith(prefix)) {
    final int recordNumber = Integer.parseInt(
        record.name().substring(prefix.length()));
    ArrayList<String> names = new ArrayList<String>();
    for (AbstractMetric m : record.metrics()) {
      if (m.name().equalsIgnoreCase("g1")) {
        collected[recordNumber].set(m.value().longValue());
        return;
      }
      names.add(m.name());
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:17,代码来源:TestMetricsSystemImpl.java

示例4: getLatestMetricValue

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
public Number getLatestMetricValue(String metricName, Number defaultValue)
    throws IndexOutOfBoundsException{
  boolean found = false;
  Number ret = null;
  for (MetricsRecord currentRecord : allMetrics) {
    // First check if this record is coming for my file system.
    if (wasGeneratedByMe(currentRecord)) {
      for (AbstractMetric currentMetric : currentRecord.metrics()) {
        if (currentMetric.name().equalsIgnoreCase(metricName)) {
          found = true;
          ret = currentMetric.value();
          break;
        }
      }
    }
  }
  if (!found) {
    if (defaultValue != null) {
      return defaultValue;
    }
    throw new IndexOutOfBoundsException(metricName);
  }
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:AzureBlobStorageTestAccount.java

示例5: putMetrics

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
/**
 * add a metrics record in the sink
 *
 * @param record the record to add
 */
@Override
public void putMetrics(MetricsRecord record) {
  // let us do this only once, otherwise
  // our count could go out of sync.
  if (count == 0) {
    for (AbstractMetric m : record.metrics()) {
      if (nameMap.contains(m.name())) {
        count++;
      }
    }

    for (MetricsTag t : record.tags()) {
      if (nameMap.contains(t.name())) {
        count++;
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestDataNodeFSDataSetSink.java

示例6: putMetrics

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
@Override
public void putMetrics(MetricsRecord record) {
  writer.print(record.timestamp());
  writer.print(" ");
  writer.print(record.context());
  writer.print(".");
  writer.print(record.name());
  String separator = ": ";
  for (MetricsTag tag : record.tags()) {
    writer.print(separator);
    separator = ", ";
    writer.print(tag.name());
    writer.print("=");
    writer.print(String.valueOf(tag.value()));
  }
  for (Metric metric : record.metrics()) {
    writer.print(separator);
    separator = ", ";
    writer.print(metric.name());
    writer.print("=");
    writer.print(metric.value());
  }
  writer.println();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:25,代码来源:FileSink.java

示例7: update

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
/**
 * Update the cache and return the cached record
 * @param mr the update record
 * @param includingTags cache tag values (for later lookup by name) if true
 * @return the updated cached record
 */
public Record update(MetricsRecord mr, boolean includingTags) {
  String name = mr.name();
  RecMap recMap = map.get(name);
  if (recMap == null) {
    recMap = new RecMap();
    map.put(name, recMap);
  }
  Collection<MetricsTag> tags = (Collection<MetricsTag>)mr.tags();
  Record rec = recMap.get(tags);
  if (rec == null) {
    rec = new Record();
    recMap.put(tags, rec);
  }
  for (Metric m : mr.metrics()) {
    rec.metrics.put(m.name(), m);
  }
  if (includingTags) {
    // mostly for some sinks that include tags as part of a dense schema
    for (MetricsTag t : mr.tags()) {
      rec.tags.put(t.name(), t.value());
    }
  }
  return rec;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:31,代码来源:MetricsCache.java

示例8: putMetrics

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
@Override
public void putMetrics(MetricsRecord record) {
  synchronized (lock) {
    rollLogDirIfNeeded();

    if (currentOutStream != null) {
      currentOutStream.printf("%d %s.%s", record.timestamp(),
          record.context(), record.name());

      String separator = ": ";

      for (MetricsTag tag : record.tags()) {
        currentOutStream.printf("%s%s=%s", separator, tag.name(),
            tag.value());
        separator = ", ";
      }

      for (AbstractMetric metric : record.metrics()) {
        currentOutStream.printf("%s%s=%s", separator, metric.name(),
            metric.value());
      }

      currentOutStream.println();

      // If we don't hflush(), the data may not be written until the file is
      // closed. The file won't be closed until the top of the hour *AND*
      // another record is received. Calling hflush() makes sure that the data
      // is complete at the top of the hour.
      try {
        currentFSOutStream.hflush();
      } catch (IOException ex) {
        throwMetricsException("Failed flushing the stream", ex);
      }

      checkForErrors("Unable to write to log file");
    } else if (!ignoreError) {
      throwMetricsException("Unable to write to log file");
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:41,代码来源:RollingFileSystemSink.java

示例9: verifyExpectedCalls

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
private void verifyExpectedCalls(long expectedCalls, int memory, int vcores)
  throws InterruptedException {
  boolean verified = false;
  int count = 0;
  while (count < 100) {
    if (scheduler.fsOpDurations.hasUpdateThreadRunChanged()) {
      break;
    }
    count++;
    Thread.sleep(10);
  }
  assertTrue("Update Thread has not run based on its metrics",
      scheduler.fsOpDurations.hasUpdateThreadRunChanged());
  assertEquals("Root queue metrics memory does not have expected value",
      memory, scheduler.getRootQueueMetrics().getAvailableMB());
  assertEquals("Root queue metrics cpu does not have expected value",
      vcores, scheduler.getRootQueueMetrics().getAvailableVirtualCores());

  MetricsCollectorImpl collector = new MetricsCollectorImpl();
  scheduler.fsOpDurations.getMetrics(collector, true);
  MetricsRecord record = collector.getRecords().get(0);
  for (AbstractMetric abstractMetric : record.metrics()) {
    if (abstractMetric.name().contains("UpdateThreadRunNumOps")) {
      assertEquals("Update Thread did not run expected number of times " +
              "based on metric record count",
          expectedCalls,
          abstractMetric.value());
      verified = true;
    }
  }
  assertTrue("Did not find metric for UpdateThreadRunNumOps", verified);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:TestSchedulingUpdate.java

示例10: recordToJson

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
StringBuilder recordToJson(MetricsRecord record) {
  // Create a json object from a metrics record.
  StringBuilder jsonLines = new StringBuilder();
  Long timestamp = record.timestamp();
  Date currDate = new Date(timestamp);
  SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
  String date = dateFormat.format(currDate);
  SimpleDateFormat timeFormat = new SimpleDateFormat("hh:mm:ss");
  String time = timeFormat.format(currDate);
  String hostname = new String("null");
  try {
    hostname = InetAddress.getLocalHost().getHostName();
  } catch (Exception e) {
    LOG.warn("Error getting Hostname, going to continue");
  }
  jsonLines.append("{\"hostname\": \"" + hostname);
  jsonLines.append("\", \"timestamp\": " + timestamp);
  jsonLines.append(", \"date\": \"" + date);
  jsonLines.append("\",\"time\": \"" + time);
  jsonLines.append("\",\"name\": \"" + record.name() + "\" ");
  for (MetricsTag tag : record.tags()) {
    jsonLines.append(
        ", \"" + tag.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
    jsonLines.append(" \"" + tag.value().toString() + "\"");
  }
  for (AbstractMetric m : record.metrics()) {
    jsonLines.append(
        ", \"" + m.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
    jsonLines.append(" \"" + m.value().toString() + "\"");
  }
  jsonLines.append("}");
  return jsonLines;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:TestKafkaMetrics.java

示例11: putMetrics

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
@Override
public void putMetrics(MetricsRecord record) {
    StringBuilder lines = new StringBuilder();
    StringBuilder metricsPathPrefix = new StringBuilder();

    // Configure the hierarchical place to display the graph.
    metricsPathPrefix.append(metricsPrefix).append(".")
            .append(record.context()).append(".").append(record.name());

    for (MetricsTag tag : record.tags()) {
        if (tag.value() != null) {
            metricsPathPrefix.append(".");
            metricsPathPrefix.append(tag.name());
            metricsPathPrefix.append("=");
            metricsPathPrefix.append(tag.value());
        }
    }

    // The record timestamp is in milliseconds while Graphite expects an epoc time in seconds.
    long timestamp = record.timestamp() / 1000L;

    // Collect datapoints.
    for (AbstractMetric metric : record.metrics()) {
        lines.append(
                metricsPathPrefix.toString() + "."
                        + metric.name().replace(' ', '.')).append(" ")
                .append(metric.value()).append(" ").append(timestamp)
                .append("\n");
    }

    try {
        if(writer != null){
          writer.write(lines.toString());
        } else {
          throw new MetricsException("Writer in GraphiteSink is null!");
        }
    } catch (Exception e) {
        throw new MetricsException("Error sending metrics", e);
    }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:41,代码来源:GraphiteSink.java

示例12: putMetrics

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
@Override
public void putMetrics(MetricsRecord record) {
	logger.info("BEGIN RECORD");
	logger.info("BEGIN RECORD STATS");
	logger.info("Record Context: " + record.context());
	logger.info("Record Description: " + record.description());
	logger.info("Record HashCode: " + record.hashCode());
	logger.info("Record Name: " + record.name());
	logger.info("Record String: " + record.toString());
	logger.info("END RECORD STATS");
	
	logger.info("BEGIN RECORD TAGS");
	for (MetricsTag tag : record.tags()) {
		logger.info("Tag Name: " + tag.name() +", Value: " + tag.value());
	} 
	logger.info("END RECORD TAGS");
	
	logger.info("BEGIN METRICS");
	for (AbstractMetric metric : record.metrics()) {
		logger.info("Metric Name: " + metric.name() +", Value: " + metric.value());
		logger.info("Metric Type: " + metric.type());
		logger.info("Metric Description: " + metric.description());
		logger.info("Metric HashCode: " + metric.hashCode());
		logger.info("Metric String: " + metric.toString());
		logger.info("");
	}						
	logger.info("END METRICS");
	logger.info("END RECORD");
}
 
开发者ID:sschwartzman,项目名称:newrelic-hadoop-plugin,代码行数:30,代码来源:SimpleSink.java

示例13: putMetrics

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
@Override
public void putMetrics(MetricsRecord record) {
    StringBuilder lines = new StringBuilder();
    StringBuilder metricsPathPrefix = new StringBuilder();

    // Configure the hierarchical place to display the graph.
    metricsPathPrefix.append(metricsPrefix).append(".")
            .append(record.context()).append(".").append(record.name());

    for (MetricsTag tag : record.tags()) {
        if (tag.value() != null) {
            metricsPathPrefix.append(".");
            metricsPathPrefix.append(tag.name());
            metricsPathPrefix.append("=");
            metricsPathPrefix.append(tag.value());
        }
    }

    // The record timestamp is in milliseconds while Graphite expects an epoc time in seconds.
    long timestamp = record.timestamp() / 1000L;

    // Collect datapoints.
    for (AbstractMetric metric : record.metrics()) {
        lines.append(
                metricsPathPrefix.toString() + "."
                        + metric.name().replace(' ', '.')).append(" ")
                .append(metric.value()).append(" ").append(timestamp)
                .append("\n");
    }

    try {
      graphite.write(lines.toString());
    } catch (Exception e) {
      LOG.warn("Error sending metrics to Graphite", e);
      try {
        graphite.close();
      } catch (Exception e1) {
        throw new MetricsException("Error closing connection to Graphite", e1);
      }
    }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:42,代码来源:GraphiteSink.java

示例14: putMetrics

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
@Override
public void putMetrics(MetricsRecord record) {

  String hn = hostName;
  String ctx = record.context();
  String sn = serviceName;

  for (MetricsTag tag : record.tags()) {
    if (tag.info().name().equals(MsInfo.Hostname.name())
        && tag.value() != null) {
      hn = tag.value();
    } else if (tag.info().name().equals(MsInfo.Context.name())
        && tag.value() != null) {
      ctx = tag.value();
    } else if (tag.info().name().equals(MsInfo.ProcessName.name())
        && tag.value() != null) {
      sn = tag.value();
    }
  }

  StringBuilder buf = new StringBuilder();
  if (!skipHostname && hn != null) {
    int idx = hn.indexOf(".");
    if (idx == -1) {
      buf.append(hn).append(PERIOD);
    } else {
      buf.append(hn.substring(0, idx)).append(PERIOD);
    }
  }
  buf.append(sn).append(PERIOD);
  buf.append(ctx).append(PERIOD);
  buf.append(record.name().replaceAll("\\.", "-")).append(PERIOD);

  // Collect datapoints.
  for (AbstractMetric metric : record.metrics()) {
    String type = null;
    if (metric.type().equals(MetricType.COUNTER)) {
      type = "c";
    } else if (metric.type().equals(MetricType.GAUGE)) {
      type = "g";
    }
    StringBuilder line = new StringBuilder();
    line.append(buf.toString())
        .append(metric.name().replace(' ', '_'))
        .append(":")
        .append(metric.value())
        .append("|")
        .append(type);
    writeMetric(line.toString());
  }

}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:53,代码来源:StatsDSink.java

示例15: putMetrics

import org.apache.hadoop.metrics2.MetricsRecord; //导入方法依赖的package包/类
@Override
public void putMetrics(final MetricsRecord metricsRecord) {
    StringBuilder lines = new StringBuilder();
    LOG.debug("########## Start Put metric ##########");
    // Configure the hierarchical place to display the graph.
    LOG.debug("Going to put metricsRecord context: " + metricsRecord.context() + " with name: " + metricsRecord.name());
    StringBuilder tags = new StringBuilder();

    for (MetricsTag tag : metricsRecord.tags()) {
        if (tag.value() != null) {
            tags.append(tag.name())
                .append("=")
                .append(tag.value())
                .append(",");
        }
    }

    // Add the clustername to the tags String if defined
    tags.append("cluster=").append(this.clusterName);

    for (AbstractMetric metric : metricsRecord.metrics()) {

        // Because influx cannot handle -0 floats we are checking and converting them
        // to 0
        float f = metric.value().intValue();
        if (f < 0) {
            f = 0;
        }

        lines.append(metric.name().replace(" ", "_"))
            .append(",")
            .append(tags.toString().trim())
            .append(" ")
            .append("value=")
            .append(f)
            .append(" ")
            .append("\n");
    }
    try {
        if (lines.toString() != null) {
            influxDBService.write(lines.toString());
        }
    }
    catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:arnobroekhof,项目名称:hadoop-metrics-influxdb,代码行数:48,代码来源:InfluxdbSink.java


注:本文中的org.apache.hadoop.metrics2.MetricsRecord.metrics方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。