本文整理汇总了Java中kafka.metrics.KafkaMetricsReporter类的典型用法代码示例。如果您正苦于以下问题:Java KafkaMetricsReporter类的具体用法?Java KafkaMetricsReporter怎么用?Java KafkaMetricsReporter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
KafkaMetricsReporter类属于kafka.metrics包,在下文中一共展示了KafkaMetricsReporter类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: SupportedServerStartable
import kafka.metrics.KafkaMetricsReporter; //导入依赖的package包/类
public SupportedServerStartable(Properties brokerConfiguration) {
Seq<KafkaMetricsReporter>
reporters =
KafkaMetricsReporter$.MODULE$.startReporters(new VerifiableProperties(brokerConfiguration));
KafkaConfig serverConfig = KafkaConfig.fromProps(brokerConfiguration);
Option<String> noThreadNamePrefix = Option.empty();
server = new KafkaServer(serverConfig, Time.SYSTEM, noThreadNamePrefix, reporters);
KafkaSupportConfig kafkaSupportConfig = new KafkaSupportConfig(brokerConfiguration);
if (kafkaSupportConfig.isProactiveSupportEnabled()) {
try {
Runtime serverRuntime = Runtime.getRuntime();
metricsReporter =
new MetricsReporter(server, kafkaSupportConfig, serverRuntime);
metricsReporter.init();
metricsThread = newThread("ConfluentProactiveSupportMetricsAgent", metricsReporter, true);
long reportIntervalMs = kafkaSupportConfig.getReportIntervalMs();
long reportIntervalHours = reportIntervalMs / (60 * 60 * 1000);
// We log at WARN level to increase the visibility of this information.
log.warn(legalDisclaimerProactiveSupportEnabled(reportIntervalHours));
} catch (Exception e) {
// We catch any exceptions to prevent collateral damage to the more important broker
// threads that are running in the same JVM.
log.error("Failed to start Proactive Support Metrics agent: {}", e.getMessage());
}
} else {
// We log at WARN level to increase the visibility of this information.
log.warn(legalDisclaimerProactiveSupportDisabled());
}
}
示例2: init
import kafka.metrics.KafkaMetricsReporter; //导入依赖的package包/类
private void init() {
String consumerUuid = null;
if (config.consumerId != null) { // for testing only
consumerUuid = config.consumerId;
} else { // generate unique consumerId automatically
UUID uuid = UUID.randomUUID();
consumerUuid = String.format("%s-%d-%s", Utils.getHostName(),
System.currentTimeMillis(),
Long.toHexString(uuid.getMostSignificantBits()).substring(0, 8));
}
consumerIdString = config.groupId + "_" + consumerUuid;
logger = LoggerFactory.getLogger(ZookeeperConsumerConnector.class + "[" + consumerIdString + "]");
connectZk();
createFetcher();
if (config.autoCommitEnable) {
scheduler.startup();
logger.info("starting auto committer every {} ms", config.autoCommitIntervalMs);
scheduler.schedule("kafka-consumer-autocommit", new Runnable() {
@Override
public void run() {
autoCommit();
}
},
/*delay =*/ config.autoCommitIntervalMs,
/*period =*/ config.autoCommitIntervalMs,
/*unit =*/ TimeUnit.MILLISECONDS);
}
KafkaMetricsReporter.startReporters(config.props);
}
示例3: startBroker
import kafka.metrics.KafkaMetricsReporter; //导入依赖的package包/类
private KafkaServer startBroker(Properties props) {
List<KafkaMetricsReporter> kmrList = new ArrayList<>();
Buffer<KafkaMetricsReporter> metricsList = scala.collection.JavaConversions.asScalaBuffer(kmrList);
KafkaServer server =
new KafkaServer(new KafkaConfig(props), new SystemTime(), Option.<String>empty(), metricsList);
server.startup();
return server;
}
示例4: startBroker
import kafka.metrics.KafkaMetricsReporter; //导入依赖的package包/类
private KafkaServer startBroker(Properties props) {
List<KafkaMetricsReporter> kmrList = new ArrayList<>();
Buffer<KafkaMetricsReporter> metricsList = scala.collection.JavaConversions.asScalaBuffer(kmrList);
KafkaServer server = new KafkaServer(new KafkaConfig(props), new SystemTime(), Option.<String>empty(), metricsList);
server.startup();
return server;
}
示例5: getKafkaServer
import kafka.metrics.KafkaMetricsReporter; //导入依赖的package包/类
/**
* Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed).
*/
protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
Properties kafkaProperties = new Properties();
// properties have to be Strings
kafkaProperties.put("advertised.host.name", KAFKA_HOST);
kafkaProperties.put("broker.id", Integer.toString(brokerId));
kafkaProperties.put("log.dir", tmpFolder.toString());
kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));
kafkaProperties.put("transaction.max.timeout.ms", Integer.toString(1000 * 60 * 60 * 2)); // 2hours
// for CI stability, increase zookeeper session timeout
kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
if (config.getKafkaServerProperties() != null) {
kafkaProperties.putAll(config.getKafkaServerProperties());
}
final int numTries = 5;
for (int i = 1; i <= numTries; i++) {
int kafkaPort = NetUtils.getAvailablePort();
kafkaProperties.put("port", Integer.toString(kafkaPort));
if (config.isHideKafkaBehindProxy()) {
NetworkFailuresProxy proxy = createProxy(KAFKA_HOST, kafkaPort);
kafkaProperties.put("advertised.port", proxy.getLocalPort());
}
//to support secure kafka cluster
if (config.isSecureMode()) {
LOG.info("Adding Kafka secure configurations");
kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
kafkaProperties.putAll(getSecureProperties());
}
KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);
try {
scala.Option<String> stringNone = scala.Option.apply(null);
KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0));
server.startup();
return server;
}
catch (KafkaException e) {
if (e.getCause() instanceof BindException) {
// port conflict, retry...
LOG.info("Port conflict when starting Kafka Broker. Retrying...");
}
else {
throw e;
}
}
}
throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts.");
}
示例6: getKafkaServer
import kafka.metrics.KafkaMetricsReporter; //导入依赖的package包/类
/**
* Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed).
*/
protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
Properties kafkaProperties = new Properties();
// properties have to be Strings
kafkaProperties.put("advertised.host.name", KAFKA_HOST);
kafkaProperties.put("broker.id", Integer.toString(brokerId));
kafkaProperties.put("log.dir", tmpFolder.toString());
kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));
// for CI stability, increase zookeeper session timeout
kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
if (config.getKafkaServerProperties() != null) {
kafkaProperties.putAll(config.getKafkaServerProperties());
}
final int numTries = 5;
for (int i = 1; i <= numTries; i++) {
int kafkaPort = NetUtils.getAvailablePort();
kafkaProperties.put("port", Integer.toString(kafkaPort));
if (config.isHideKafkaBehindProxy()) {
NetworkFailuresProxy proxy = createProxy(KAFKA_HOST, kafkaPort);
kafkaProperties.put("advertised.port", proxy.getLocalPort());
}
//to support secure kafka cluster
if (config.isSecureMode()) {
LOG.info("Adding Kafka secure configurations");
kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
kafkaProperties.putAll(getSecureProperties());
}
KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);
try {
scala.Option<String> stringNone = scala.Option.apply(null);
KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0));
server.startup();
return server;
}
catch (KafkaException e) {
if (e.getCause() instanceof BindException) {
// port conflict, retry...
LOG.info("Port conflict when starting Kafka Broker. Retrying...");
}
else {
throw e;
}
}
}
throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts.");
}