本文整理汇总了Java中org.apache.kafka.common.metrics.Measurable类的典型用法代码示例。如果您正苦于以下问题:Java Measurable类的具体用法?Java Measurable怎么用?Java Measurable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Measurable类属于org.apache.kafka.common.metrics包,在下文中一共展示了Measurable类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ConsumerCoordinatorMetrics
import org.apache.kafka.common.metrics.Measurable; //导入依赖的package包/类
private ConsumerCoordinatorMetrics(Metrics metrics, String metricGrpPrefix) {
this.metricGrpName = metricGrpPrefix + "-coordinator-metrics";
this.commitLatency = metrics.sensor("commit-latency");
this.commitLatency.add(metrics.metricName("commit-latency-avg",
this.metricGrpName,
"The average time taken for a commit request"), new Avg());
this.commitLatency.add(metrics.metricName("commit-latency-max",
this.metricGrpName,
"The max time taken for a commit request"), new Max());
this.commitLatency.add(metrics.metricName("commit-rate",
this.metricGrpName,
"The number of commit calls per second"), new Rate(new Count()));
Measurable numParts =
new Measurable() {
public double measure(MetricConfig config, long now) {
return subscriptions.assignedPartitions().size();
}
};
metrics.addMetric(metrics.metricName("assigned-partitions",
this.metricGrpName,
"The number of partitions currently assigned to this consumer"), numParts);
}
示例2: ConsumerCoordinatorMetrics
import org.apache.kafka.common.metrics.Measurable; //导入依赖的package包/类
public ConsumerCoordinatorMetrics(Metrics metrics, String metricGrpPrefix) {
this.metrics = metrics;
this.metricGrpName = metricGrpPrefix + "-coordinator-metrics";
this.commitLatency = metrics.sensor("commit-latency");
this.commitLatency.add(metrics.metricName("commit-latency-avg",
this.metricGrpName,
"The average time taken for a commit request"), new Avg());
this.commitLatency.add(metrics.metricName("commit-latency-max",
this.metricGrpName,
"The max time taken for a commit request"), new Max());
this.commitLatency.add(metrics.metricName("commit-rate",
this.metricGrpName,
"The number of commit calls per second"), new Rate(new Count()));
Measurable numParts =
new Measurable() {
public double measure(MetricConfig config, long now) {
return subscriptions.assignedPartitions().size();
}
};
metrics.addMetric(metrics.metricName("assigned-partitions",
this.metricGrpName,
"The number of partitions currently assigned to this consumer"), numParts);
}
示例3: stats
import org.apache.kafka.common.metrics.Measurable; //导入依赖的package包/类
@Override
public List<NamedMeasurable> stats() {
List<NamedMeasurable> ms = new ArrayList<NamedMeasurable>(this.percentiles.length);
for (Percentile percentile : this.percentiles) {
final double pct = percentile.percentile();
ms.add(new NamedMeasurable(percentile.name(), new Measurable() {
public double measure(MetricConfig config, long now) {
return value(config, now, pct / 100.0);
}
}));
}
return ms;
}
示例4: KafkaMonitor
import org.apache.kafka.common.metrics.Measurable; //导入依赖的package包/类
public KafkaMonitor(Map<String, Map> testProps) throws Exception {
_apps = new ConcurrentHashMap<>();
_services = new ConcurrentHashMap<>();
for (Map.Entry<String, Map> entry : testProps.entrySet()) {
String name = entry.getKey();
Map props = entry.getValue();
if (!props.containsKey(CLASS_NAME_CONFIG))
throw new IllegalArgumentException(name + " is not configured with " + CLASS_NAME_CONFIG);
String className = (String) props.get(CLASS_NAME_CONFIG);
Class<?> cls = Class.forName(className);
if (App.class.isAssignableFrom(cls)) {
App test = (App) Class.forName(className).getConstructor(Map.class, String.class).newInstance(props, name);
_apps.put(name, test);
} else if (Service.class.isAssignableFrom(cls)) {
Service service = (Service) Class.forName(className).getConstructor(Map.class, String.class).newInstance(props, name);
_services.put(name, service);
} else {
throw new IllegalArgumentException(className + " should implement either " + App.class.getSimpleName() + " or " + Service.class.getSimpleName());
}
}
_executor = Executors.newSingleThreadScheduledExecutor();
_offlineRunnables = new ConcurrentHashMap<>();
List<MetricsReporter> reporters = new ArrayList<>();
reporters.add(new JmxReporter(JMX_PREFIX));
Metrics metrics = new Metrics(new MetricConfig(), reporters, new SystemTime());
metrics.addMetric(metrics.metricName("offline-runnable-count", METRIC_GROUP_NAME, "The number of Service/App that are not fully running"),
new Measurable() {
@Override
public double measure(MetricConfig config, long now) {
return _offlineRunnables.size();
}
}
);
}
示例5: GroupCoordinatorMetrics
import org.apache.kafka.common.metrics.Measurable; //导入依赖的package包/类
public GroupCoordinatorMetrics(Metrics metrics, String metricGrpPrefix) {
this.metricGrpName = metricGrpPrefix + "-coordinator-metrics";
this.heartbeatLatency = metrics.sensor("heartbeat-latency");
this.heartbeatLatency.add(metrics.metricName("heartbeat-response-time-max",
this.metricGrpName,
"The max time taken to receive a response to a heartbeat request"), new Max());
this.heartbeatLatency.add(metrics.metricName("heartbeat-rate",
this.metricGrpName,
"The average number of heartbeats per second"), new Rate(new Count()));
this.joinLatency = metrics.sensor("join-latency");
this.joinLatency.add(metrics.metricName("join-time-avg",
this.metricGrpName,
"The average time taken for a group rejoin"), new Avg());
this.joinLatency.add(metrics.metricName("join-time-max",
this.metricGrpName,
"The max time taken for a group rejoin"), new Max());
this.joinLatency.add(metrics.metricName("join-rate",
this.metricGrpName,
"The number of group joins per second"), new Rate(new Count()));
this.syncLatency = metrics.sensor("sync-latency");
this.syncLatency.add(metrics.metricName("sync-time-avg",
this.metricGrpName,
"The average time taken for a group sync"), new Avg());
this.syncLatency.add(metrics.metricName("sync-time-max",
this.metricGrpName,
"The max time taken for a group sync"), new Max());
this.syncLatency.add(metrics.metricName("sync-rate",
this.metricGrpName,
"The number of group syncs per second"), new Rate(new Count()));
Measurable lastHeartbeat =
new Measurable() {
public double measure(MetricConfig config, long now) {
return TimeUnit.SECONDS.convert(now - heartbeat.lastHeartbeatSend(), TimeUnit.MILLISECONDS);
}
};
metrics.addMetric(metrics.metricName("last-heartbeat-seconds-ago",
this.metricGrpName,
"The number of seconds since the last controller heartbeat was sent"),
lastHeartbeat);
}
示例6: SelectorMetrics
import org.apache.kafka.common.metrics.Measurable; //导入依赖的package包/类
public SelectorMetrics(Metrics metrics, String metricGrpPrefix, Map<String, String> metricTags, boolean metricsPerConnection) {
this.metrics = metrics;
this.metricGrpPrefix = metricGrpPrefix;
this.metricTags = metricTags;
this.metricsPerConnection = metricsPerConnection;
String metricGrpName = metricGrpPrefix + "-metrics";
StringBuilder tagsSuffix = new StringBuilder();
for (Map.Entry<String, String> tag: metricTags.entrySet()) {
tagsSuffix.append(tag.getKey());
tagsSuffix.append("-");
tagsSuffix.append(tag.getValue());
}
// 监控连接关闭,使用rate记录每秒连接关闭数
this.connectionClosed = sensor("connections-closed:" + tagsSuffix.toString());
MetricName metricName = metrics.metricName("connection-close-rate", metricGrpName, "Connections closed per second in the window.", metricTags);
this.connectionClosed.add(metricName, new Rate());
// 监控连接创建,使用rate记录每秒连接创建数
this.connectionCreated = sensor("connections-created:" + tagsSuffix.toString());
metricName = metrics.metricName("connection-creation-rate", metricGrpName, "New connections established per second in the window.", metricTags);
this.connectionCreated.add(metricName, new Rate());
// 监控网络操作数,使用rate记录每秒钟所有连接上执行读写操作总数
this.bytesTransferred = sensor("bytes-sent-received:" + tagsSuffix.toString());
metricName = metrics.metricName("network-io-rate", metricGrpName, "The average number of network operations (reads or writes) on all connections per second.", metricTags);
bytesTransferred.add(metricName, new Rate(new Count()));
// 监控发送请求相关指标
this.bytesSent = sensor("bytes-sent:" + tagsSuffix.toString(), bytesTransferred);
metricName = metrics.metricName("outgoing-byte-rate", metricGrpName, "The average number of outgoing bytes sent per second to all servers.", metricTags);
this.bytesSent.add(metricName, new Rate());
metricName = metrics.metricName("request-rate", metricGrpName, "The average number of requests sent per second.", metricTags);
this.bytesSent.add(metricName, new Rate(new Count()));
metricName = metrics.metricName("request-size-avg", metricGrpName, "The average size of all requests in the window..", metricTags);
this.bytesSent.add(metricName, new Avg());
metricName = metrics.metricName("request-size-max", metricGrpName, "The maximum size of any request sent in the window.", metricTags);
this.bytesSent.add(metricName, new Max());
// 监控接受请求的相关指标
this.bytesReceived = sensor("bytes-received:" + tagsSuffix.toString(), bytesTransferred);
metricName = metrics.metricName("incoming-byte-rate", metricGrpName, "Bytes/second read off all sockets", metricTags);
this.bytesReceived.add(metricName, new Rate());
metricName = metrics.metricName("response-rate", metricGrpName, "Responses received sent per second.", metricTags);
this.bytesReceived.add(metricName, new Rate(new Count()));
// 监控select方法指标
this.selectTime = sensor("select-time:" + tagsSuffix.toString());
metricName = metrics.metricName("select-rate", metricGrpName, "Number of times the I/O layer checked for new I/O to perform per second", metricTags);
this.selectTime.add(metricName, new Rate(new Count()));
metricName = metrics.metricName("io-wait-time-ns-avg", metricGrpName, "The average length of time the I/O thread spent waiting for a socket ready for reads or writes in nanoseconds.", metricTags);
this.selectTime.add(metricName, new Avg());
metricName = metrics.metricName("io-wait-ratio", metricGrpName, "The fraction of time the I/O thread spent waiting.", metricTags);
this.selectTime.add(metricName, new Rate(TimeUnit.NANOSECONDS));
// 监控select方法的相关指标
this.ioTime = sensor("io-time:" + tagsSuffix.toString());
metricName = metrics.metricName("io-time-ns-avg", metricGrpName, "The average length of time for I/O per select call in nanoseconds.", metricTags);
this.ioTime.add(metricName, new Avg());
metricName = metrics.metricName("io-ratio", metricGrpName, "The fraction of time the I/O thread spent doing I/O", metricTags);
this.ioTime.add(metricName, new Rate(TimeUnit.NANOSECONDS));
metricName = metrics.metricName("connection-count", metricGrpName, "The current number of active connections.", metricTags);
topLevelMetricNames.add(metricName);
this.metrics.addMetric(metricName, new Measurable() {
public double measure(MetricConfig config, long now) {
return channels.size();
}
});
}
示例7: GroupCoordinatorMetrics
import org.apache.kafka.common.metrics.Measurable; //导入依赖的package包/类
public GroupCoordinatorMetrics(Metrics metrics, String metricGrpPrefix) {
this.metrics = metrics;
this.metricGrpName = metricGrpPrefix + "-coordinator-metrics";
this.heartbeatLatency = metrics.sensor("heartbeat-latency");
this.heartbeatLatency.add(metrics.metricName("heartbeat-response-time-max",
this.metricGrpName,
"The max time taken to receive a response to a heartbeat request"), new Max());
this.heartbeatLatency.add(metrics.metricName("heartbeat-rate",
this.metricGrpName,
"The average number of heartbeats per second"), new Rate(new Count()));
this.joinLatency = metrics.sensor("join-latency");
this.joinLatency.add(metrics.metricName("join-time-avg",
this.metricGrpName,
"The average time taken for a group rejoin"), new Avg());
this.joinLatency.add(metrics.metricName("join-time-max",
this.metricGrpName,
"The max time taken for a group rejoin"), new Avg());
this.joinLatency.add(metrics.metricName("join-rate",
this.metricGrpName,
"The number of group joins per second"), new Rate(new Count()));
this.syncLatency = metrics.sensor("sync-latency");
this.syncLatency.add(metrics.metricName("sync-time-avg",
this.metricGrpName,
"The average time taken for a group sync"), new Avg());
this.syncLatency.add(metrics.metricName("sync-time-max",
this.metricGrpName,
"The max time taken for a group sync"), new Avg());
this.syncLatency.add(metrics.metricName("sync-rate",
this.metricGrpName,
"The number of group syncs per second"), new Rate(new Count()));
Measurable lastHeartbeat =
new Measurable() {
public double measure(MetricConfig config, long now) {
return TimeUnit.SECONDS.convert(now - heartbeat.lastHeartbeatSend(), TimeUnit.MILLISECONDS);
}
};
metrics.addMetric(metrics.metricName("last-heartbeat-seconds-ago",
this.metricGrpName,
"The number of seconds since the last controller heartbeat"),
lastHeartbeat);
}
示例8: SelectorMetrics
import org.apache.kafka.common.metrics.Measurable; //导入依赖的package包/类
public SelectorMetrics(Metrics metrics) {
this.metrics = metrics;
String metricGrpName = metricGrpPrefix + "-metrics";
StringBuilder tagsSuffix = new StringBuilder();
for (Map.Entry<String, String> tag: metricTags.entrySet()) {
tagsSuffix.append(tag.getKey());
tagsSuffix.append("-");
tagsSuffix.append(tag.getValue());
}
this.connectionClosed = sensor("connections-closed:" + tagsSuffix.toString());
MetricName metricName = metrics.metricName("connection-close-rate", metricGrpName, "Connections closed per second in the window.", metricTags);
this.connectionClosed.add(metricName, new Rate());
this.connectionCreated = sensor("connections-created:" + tagsSuffix.toString());
metricName = metrics.metricName("connection-creation-rate", metricGrpName, "New connections established per second in the window.", metricTags);
this.connectionCreated.add(metricName, new Rate());
this.bytesTransferred = sensor("bytes-sent-received:" + tagsSuffix.toString());
metricName = metrics.metricName("network-io-rate", metricGrpName, "The average number of network operations (reads or writes) on all connections per second.", metricTags);
bytesTransferred.add(metricName, new Rate(new Count()));
this.bytesSent = sensor("bytes-sent:" + tagsSuffix.toString(), bytesTransferred);
metricName = metrics.metricName("outgoing-byte-rate", metricGrpName, "The average number of outgoing bytes sent per second to all servers.", metricTags);
this.bytesSent.add(metricName, new Rate());
metricName = metrics.metricName("request-rate", metricGrpName, "The average number of requests sent per second.", metricTags);
this.bytesSent.add(metricName, new Rate(new Count()));
metricName = metrics.metricName("request-size-avg", metricGrpName, "The average size of all requests in the window..", metricTags);
this.bytesSent.add(metricName, new Avg());
metricName = metrics.metricName("request-size-max", metricGrpName, "The maximum size of any request sent in the window.", metricTags);
this.bytesSent.add(metricName, new Max());
this.bytesReceived = sensor("bytes-received:" + tagsSuffix.toString(), bytesTransferred);
metricName = metrics.metricName("incoming-byte-rate", metricGrpName, "Bytes/second read off all sockets", metricTags);
this.bytesReceived.add(metricName, new Rate());
metricName = metrics.metricName("response-rate", metricGrpName, "Responses received sent per second.", metricTags);
this.bytesReceived.add(metricName, new Rate(new Count()));
this.selectTime = sensor("select-time:" + tagsSuffix.toString());
metricName = metrics.metricName("select-rate", metricGrpName, "Number of times the I/O layer checked for new I/O to perform per second", metricTags);
this.selectTime.add(metricName, new Rate(new Count()));
metricName = metrics.metricName("io-wait-time-ns-avg", metricGrpName, "The average length of time the I/O thread spent waiting for a socket ready for reads or writes in nanoseconds.", metricTags);
this.selectTime.add(metricName, new Avg());
metricName = metrics.metricName("io-wait-ratio", metricGrpName, "The fraction of time the I/O thread spent waiting.", metricTags);
this.selectTime.add(metricName, new Rate(TimeUnit.NANOSECONDS));
this.ioTime = sensor("io-time:" + tagsSuffix.toString());
metricName = metrics.metricName("io-time-ns-avg", metricGrpName, "The average length of time for I/O per select call in nanoseconds.", metricTags);
this.ioTime.add(metricName, new Avg());
metricName = metrics.metricName("io-ratio", metricGrpName, "The fraction of time the I/O thread spent doing I/O", metricTags);
this.ioTime.add(metricName, new Rate(TimeUnit.NANOSECONDS));
metricName = metrics.metricName("connection-count", metricGrpName, "The current number of active connections.", metricTags);
topLevelMetricNames.add(metricName);
this.metrics.addMetric(metricName, new Measurable() {
public double measure(MetricConfig config, long now) {
return channels.size();
}
});
}
示例9: ConsumeMetrics
import org.apache.kafka.common.metrics.Measurable; //导入依赖的package包/类
public ConsumeMetrics(Metrics metrics, final Map<String, String> tags) {
this.metrics = metrics;
_bytesConsumed = metrics.sensor("bytes-consumed");
_bytesConsumed.add(new MetricName("bytes-consumed-rate", METRIC_GROUP_NAME, "The average number of bytes per second that are consumed", tags), new Rate());
_consumeError = metrics.sensor("consume-error");
_consumeError.add(new MetricName("consume-error-rate", METRIC_GROUP_NAME, "The average number of errors per second", tags), new Rate());
_consumeError.add(new MetricName("consume-error-total", METRIC_GROUP_NAME, "The total number of errors", tags), new Total());
_recordsConsumed = metrics.sensor("records-consumed");
_recordsConsumed.add(new MetricName("records-consumed-rate", METRIC_GROUP_NAME, "The average number of records per second that are consumed", tags), new Rate());
_recordsConsumed.add(new MetricName("records-consumed-total", METRIC_GROUP_NAME, "The total number of records that are consumed", tags), new Total());
_recordsDuplicated = metrics.sensor("records-duplicated");
_recordsDuplicated.add(new MetricName("records-duplicated-rate", METRIC_GROUP_NAME, "The average number of records per second that are duplicated", tags), new Rate());
_recordsDuplicated.add(new MetricName("records-duplicated-total", METRIC_GROUP_NAME, "The total number of records that are duplicated", tags), new Total());
_recordsLost = metrics.sensor("records-lost");
_recordsLost.add(new MetricName("records-lost-rate", METRIC_GROUP_NAME, "The average number of records per second that are lost", tags), new Rate());
_recordsLost.add(new MetricName("records-lost-total", METRIC_GROUP_NAME, "The total number of records that are lost", tags), new Total());
_recordsDelayed = metrics.sensor("records-delayed");
_recordsDelayed.add(new MetricName("records-delayed-rate", METRIC_GROUP_NAME, "The average number of records per second that are either lost or arrive after maximum allowed latency under SLA", tags), new Rate());
_recordsDelayed.add(new MetricName("records-delayed-total", METRIC_GROUP_NAME, "The total number of records that are either lost or arrive after maximum allowed latency under SLA", tags), new Total());
_recordsDelay = metrics.sensor("records-delay");
_recordsDelay.add(new MetricName("records-delay-ms-avg", METRIC_GROUP_NAME, "The average latency of records from producer to consumer", tags), new Avg());
_recordsDelay.add(new MetricName("records-delay-ms-max", METRIC_GROUP_NAME, "The maximum latency of records from producer to consumer", tags), new Max());
// There are 2 extra buckets use for values smaller than 0.0 or larger than max, respectively.
int bucketNum = _latencyPercentileMaxMs / _latencyPercentileGranularityMs + 2;
int sizeInBytes = 4 * bucketNum;
_recordsDelay.add(new Percentiles(sizeInBytes, _latencyPercentileMaxMs, Percentiles.BucketSizing.CONSTANT,
new Percentile(new MetricName("records-delay-ms-99th", METRIC_GROUP_NAME, "The 99th percentile latency of records from producer to consumer", tags), 99.0),
new Percentile(new MetricName("records-delay-ms-999th", METRIC_GROUP_NAME, "The 999th percentile latency of records from producer to consumer", tags), 99.9)));
metrics.addMetric(new MetricName("consume-availability-avg", METRIC_GROUP_NAME, "The average consume availability", tags),
new Measurable() {
@Override
public double measure(MetricConfig config, long now) {
double recordsConsumedRate = _sensors.metrics.metrics().get(new MetricName("records-consumed-rate", METRIC_GROUP_NAME, tags)).value();
double recordsLostRate = _sensors.metrics.metrics().get(new MetricName("records-lost-rate", METRIC_GROUP_NAME, tags)).value();
double recordsDelayedRate = _sensors.metrics.metrics().get(new MetricName("records-delayed-rate", METRIC_GROUP_NAME, tags)).value();
if (new Double(recordsLostRate).isNaN())
recordsLostRate = 0;
if (new Double(recordsDelayedRate).isNaN())
recordsDelayedRate = 0;
double consumeAvailability = recordsConsumedRate + recordsLostRate > 0
? (recordsConsumedRate - recordsDelayedRate) / (recordsConsumedRate + recordsLostRate) : 0;
return consumeAvailability;
}
}
);
}
示例10: ProduceMetrics
import org.apache.kafka.common.metrics.Measurable; //导入依赖的package包/类
public ProduceMetrics(Metrics metrics, final Map<String, String> tags) {
this.metrics = metrics;
this._tags = tags;
_recordsProducedPerPartition = new ConcurrentHashMap<>();
_produceErrorPerPartition = new ConcurrentHashMap<>();
_recordsProduced = metrics.sensor("records-produced");
_recordsProduced.add(new MetricName("records-produced-rate", METRIC_GROUP_NAME, "The average number of records per second that are produced", tags), new Rate());
_recordsProduced.add(new MetricName("records-produced-total", METRIC_GROUP_NAME, "The total number of records that are produced", tags), new Total());
_produceError = metrics.sensor("produce-error");
_produceError.add(new MetricName("produce-error-rate", METRIC_GROUP_NAME, "The average number of errors per second", tags), new Rate());
_produceError.add(new MetricName("produce-error-total", METRIC_GROUP_NAME, "The total number of errors", tags), new Total());
metrics.addMetric(new MetricName("produce-availability-avg", METRIC_GROUP_NAME, "The average produce availability", tags),
new Measurable() {
@Override
public double measure(MetricConfig config, long now) {
double availabilitySum = 0.0;
int partitionNum = _partitionNum.get();
for (int partition = 0; partition < partitionNum; partition++) {
double recordsProduced = _sensors.metrics.metrics().get(new MetricName("records-produced-rate-partition-" + partition, METRIC_GROUP_NAME, tags)).value();
double produceError = _sensors.metrics.metrics().get(new MetricName("produce-error-rate-partition-" + partition, METRIC_GROUP_NAME, tags)).value();
// If there is no error, error rate sensor may expire and the value may be NaN. Treat NaN as 0 for error rate.
if (Double.isNaN(produceError) || Double.isInfinite(produceError)) {
produceError = 0;
}
// If there is either succeeded or failed produce to a partition, consider its availability as 0.
if (recordsProduced + produceError > 0) {
availabilitySum += recordsProduced / (recordsProduced + produceError);
} else if (!_treatZeroThroughputAsUnavailable) {
// If user configures treatZeroThroughputAsUnavailable to be false, a partition's availability
// is 1.0 as long as there is no exception thrown from producer.
// This allows kafka admin to exactly monitor the availability experienced by Kafka users which
// will block and retry for a certain amount of time based on its configuration (e.g. retries, retry.backoff.ms).
// Note that if it takes a long time for messages to be retries and sent, the latency in the ConsumeService
// will increase and it will reduce ConsumeAvailability if the latency exceeds consume.latency.sla.ms
availabilitySum += 1.0;
}
}
// Assign equal weight to per-partition availability when calculating overall availability
return availabilitySum / partitionNum;
}
}
);
}