本文整理汇总了Java中org.apache.kafka.common.MetricName类的典型用法代码示例。如果您正苦于以下问题:Java MetricName类的具体用法?Java MetricName怎么用?Java MetricName使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MetricName类属于org.apache.kafka.common包,在下文中一共展示了MetricName类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: sendData
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
public void sendData(String data) {
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("acks", "all");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<>(props);
Map<MetricName, ? extends Metric> metrics = producer.metrics();
System.out.println(metrics);
for (int i = 0; i < 100; i++)
producer.send(new ProducerRecord<String, String>("video_view", data));
producer.close();
}
示例2: testLatencyMetrics
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
@Test
public void testLatencyMetrics() {
String groupName = "doesNotMatter";
String scope = "scope";
String entity = "entity";
String operation = "put";
Map<String, String> tags = new HashMap<>();
StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(new Metrics(), groupName, tags);
Sensor sensor1 = streamsMetrics.addLatencyAndThroughputSensor(scope, entity, operation, Sensor.RecordingLevel.DEBUG);
Map<MetricName, ? extends Metric> metrics = streamsMetrics.metrics();
// 6 metrics plus a common metric that keeps track of total registered metrics in Metrics() constructor
assertEquals(metrics.size(), 7);
streamsMetrics.removeSensor(sensor1);
metrics = streamsMetrics.metrics();
assertEquals(metrics.size(), 1);
}
示例3: testThroughputMetrics
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
@Test
public void testThroughputMetrics() {
String groupName = "doesNotMatter";
String scope = "scope";
String entity = "entity";
String operation = "put";
Map<String, String> tags = new HashMap<>();
StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(new Metrics(), groupName, tags);
Sensor sensor1 = streamsMetrics.addThroughputSensor(scope, entity, operation, Sensor.RecordingLevel.DEBUG);
Map<MetricName, ? extends Metric> metrics = streamsMetrics.metrics();
// 2 metrics plus a common metric that keeps track of total registered metrics in Metrics() constructor
assertEquals(metrics.size(), 3);
streamsMetrics.removeSensor(sensor1);
metrics = streamsMetrics.metrics();
assertEquals(metrics.size(), 1);
}
示例4: BufferPool
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
/**
* Create a new buffer pool
*
* @param memory The maximum amount of memory that this buffer pool can allocate
* @param poolableSize The buffer size to cache in the free list rather than deallocating
* @param metrics instance of Metrics
* @param time time instance
* @param metricGrpName logical group name for metrics
*/
public BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName) {
this.poolableSize = poolableSize;
this.lock = new ReentrantLock();
this.free = new ArrayDeque<>();
this.waiters = new ArrayDeque<>();
this.totalMemory = memory;
this.availableMemory = memory;
this.metrics = metrics;
this.time = time;
this.waitTime = this.metrics.sensor(WAIT_TIME_SENSOR_NAME);
MetricName metricName = metrics.metricName("bufferpool-wait-ratio",
metricGrpName,
"The fraction of time an appender waits for space allocation.");
this.waitTime.add(metricName, new Rate(TimeUnit.NANOSECONDS));
}
示例5: getMBeanName
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
/**
* @param metricName
* @return standard JMX MBean name in the following format domainName:type=metricType,key1=val1,key2=val2
*/
static String getMBeanName(String prefix, MetricName metricName) {
StringBuilder mBeanName = new StringBuilder();
// 前缀
mBeanName.append(prefix);
// MericName中的group
mBeanName.append(":type=");
mBeanName.append(metricName.group());
// tags集合构成
// tags集合构成
for (Map.Entry<String, String> entry : metricName.tags().entrySet()) {
if (entry.getKey().length() <= 0 || entry.getValue().length() <= 0)
continue;
mBeanName.append(",");
mBeanName.append(entry.getKey());
mBeanName.append("=");
mBeanName.append(entry.getValue());
}
return mBeanName.toString();
}
示例6: testFetchResponseMetrics
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
@Test
public void testFetchResponseMetrics() {
subscriptions.assignFromUser(singleton(tp1));
subscriptions.seek(tp1, 0);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE,
TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++)
builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
int expectedBytes = 0;
for (Record record : records.records())
expectedBytes += record.sizeInBytes();
fetchRecords(tp1, records, Errors.NONE, 100L, 0);
assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
assertEquals(3, recordsCountAverage.value(), EPSILON);
}
示例7: testFetchResponseMetricsPartialResponse
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
@Test
public void testFetchResponseMetricsPartialResponse() {
subscriptions.assignFromUser(singleton(tp1));
subscriptions.seek(tp1, 1);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE,
TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++)
builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
int expectedBytes = 0;
for (Record record : records.records()) {
if (record.offset() >= 1)
expectedBytes += record.sizeInBytes();
}
fetchRecords(tp1, records, Errors.NONE, 100L, 0);
assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
assertEquals(2, recordsCountAverage.value(), EPSILON);
}
示例8: testMetricName
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
@Test
public void testMetricName() {
MetricName n1 = metrics.metricName("name", "group", "description", "key1", "value1", "key2", "value2");
Map<String, String> tags = new HashMap<String, String>();
tags.put("key1", "value1");
tags.put("key2", "value2");
MetricName n2 = metrics.metricName("name", "group", "description", tags);
assertEquals("metric names created in two different ways should be equal", n1, n2);
try {
metrics.metricName("name", "group", "description", "key1");
fail("Creating MetricName with an odd number of keyValue should fail");
} catch (IllegalArgumentException e) {
// this is expected
}
}
示例9: printMetrics
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
/**
* print out the metrics in alphabetical order
* @param metrics the metrics to be printed out
*/
public static void printMetrics(Map<MetricName, ? extends Metric> metrics) {
if (metrics != null && !metrics.isEmpty()) {
int maxLengthOfDisplayName = 0;
TreeMap<String, Double> sortedMetrics = new TreeMap<>(new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
return o1.compareTo(o2);
}
});
for (Metric metric : metrics.values()) {
MetricName mName = metric.metricName();
String mergedName = mName.group() + ":" + mName.name() + ":" + mName.tags();
maxLengthOfDisplayName = maxLengthOfDisplayName < mergedName.length() ? mergedName.length() : maxLengthOfDisplayName;
sortedMetrics.put(mergedName, metric.value());
}
String outputFormat = "%-" + maxLengthOfDisplayName + "s : %.3f";
System.out.println(String.format("\n%-" + maxLengthOfDisplayName + "s %s", "Metric Name", "Value"));
for (Map.Entry<String, Double> entry : sortedMetrics.entrySet()) {
System.out.println(String.format(outputFormat, entry.getKey(), entry.getValue()));
}
}
}
示例10: BufferPool
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
/**
* Create a new buffer pool
*
* @param memory The maximum amount of memory that this buffer pool can allocate
* @param poolableSize The buffer size to cache in the free list rather than deallocating
* @param metrics instance of Metrics
* @param time time instance
* @param metricGrpName logical group name for metrics
*/
public BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName) {
this.poolableSize = poolableSize;
this.lock = new ReentrantLock();
this.free = new ArrayDeque<ByteBuffer>();
this.waiters = new ArrayDeque<Condition>();
this.totalMemory = memory;
this.availableMemory = memory;
this.metrics = metrics;
this.time = time;
this.waitTime = this.metrics.sensor("bufferpool-wait-time");
MetricName metricName = metrics.metricName("bufferpool-wait-ratio",
metricGrpName,
"The fraction of time an appender waits for space allocation.");
this.waitTime.add(metricName, new Rate(TimeUnit.NANOSECONDS));
}
示例11: getMBeanName
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
/**
* @param metricName
* @return standard JMX MBean name in the following format domainName:type=metricType,key1=val1,key2=val2
*/
private String getMBeanName(MetricName metricName) {
StringBuilder mBeanName = new StringBuilder();
mBeanName.append(prefix);
mBeanName.append(":type=");
mBeanName.append(metricName.group());
for (Map.Entry<String, String> entry : metricName.tags().entrySet()) {
if (entry.getKey().length() <= 0 || entry.getValue().length() <= 0)
continue;
mBeanName.append(",");
mBeanName.append(entry.getKey());
mBeanName.append("=");
mBeanName.append(entry.getValue());
}
return mBeanName.toString();
}
示例12: dropwizardMetricName
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
private static String dropwizardMetricName(KafkaMetric kafkaMetric) {
MetricName name = kafkaMetric.metricName();
List<String> nameParts = new ArrayList<String>(2);
nameParts.add(name.group());
nameParts.addAll(name.tags().values());
nameParts.add(name.name());
StringBuilder builder = new StringBuilder();
for (String namePart : nameParts) {
builder.append(namePart);
builder.append(".");
}
builder.setLength(builder.length() - 1); // Remove the trailing dot.
String processedName = builder.toString().replace(' ', '_').replace("\\.", "_");
return MetricRegistry.name(METRIC_PREFIX, processedName);
}
示例13: testMetricChange
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
@Test
public void testMetricChange() throws Exception {
Metrics metrics = new Metrics();
DropwizardReporter reporter = new DropwizardReporter();
reporter.configure(new HashMap<String, Object>());
metrics.addReporter(reporter);
Sensor sensor = metrics.sensor("kafka.requests");
sensor.add(new MetricName("pack.bean1.avg", "grp1"), new Avg());
Map<String, Gauge> gauges = SharedMetricRegistries.getOrCreate("default").getGauges();
String expectedName = "org.apache.kafka.common.metrics.grp1.pack.bean1.avg";
Assert.assertEquals(1, gauges.size());
Assert.assertEquals(expectedName, gauges.keySet().toArray()[0]);
sensor.record(2.1);
sensor.record(2.2);
sensor.record(2.6);
Assert.assertEquals(2.3, (Double)gauges.get(expectedName).getValue(), 0.001);
}
示例14: metricName
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
protected String metricName(Metric metric) {
MetricName name = metric.metricName();
StringBuilder builder = new StringBuilder();
builder.append(this.prefix);
builder.append('.');
for (Map.Entry<String, String> entry : new TreeMap<>(name.tags()).entrySet()) {
if (!entry.getKey().isEmpty() && !entry.getValue().isEmpty()) {
builder.append(entry.getValue());
builder.append('.');
}
}
builder.append(name.group());
builder.append('.');
builder.append(name.name());
return builder.toString();
}
示例15: mirrorFrom
import org.apache.kafka.common.MetricName; //导入依赖的package包/类
public void mirrorFrom(final ConcurrentMap<MetricName, KafkaMetric> kafkaMetrics,
final Predicate<String> metricFilter,
final Function<MetricName, String> metricNameAdjuster) {
for (final Map.Entry<MetricName, KafkaMetric> entry : filter(kafkaMetrics, metricFilter)) {
try {
final MetricName metricName = entry.getKey();
final String realMetricName = metricNameAdjuster.apply(metricName);
metricsFactory.createGauge(metricName.group(), realMetricName, new Gauge<Double>() {
@Override
public Double getValue() {
return entry.getValue().value();
}
});
} catch (final Exception e) {
log.error("Could not process metrics", e);
}
}
}