本文整理汇总了Java中org.apache.kafka.common.utils.AppInfoParser类的典型用法代码示例。如果您正苦于以下问题:Java AppInfoParser类的具体用法?Java AppInfoParser怎么用?Java AppInfoParser使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
AppInfoParser类属于org.apache.kafka.common.utils包,在下文中一共展示了AppInfoParser类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: close
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
private void close(boolean swallowException) {
log.trace("Closing the Kafka consumer.");
AtomicReference<Throwable> firstException = new AtomicReference<>();
this.closed = true;
ClientUtils.closeQuietly(coordinator, "coordinator", firstException);
ClientUtils.closeQuietly(interceptors, "consumer interceptors", firstException);
ClientUtils.closeQuietly(metrics, "consumer metrics", firstException);
ClientUtils.closeQuietly(client, "consumer network client", firstException);
ClientUtils.closeQuietly(keyDeserializer, "consumer key deserializer", firstException);
ClientUtils.closeQuietly(valueDeserializer, "consumer value deserializer", firstException);
AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId);
log.debug("The Kafka consumer has closed.");
if (firstException.get() != null && !swallowException) {
throw new KafkaException("Failed to close kafka consumer", firstException.get());
}
}
示例2: testCollectMetrics
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Test
public void testCollectMetrics() {
// Given
TimeUtils time = new TimeUtils();
Uuid uuid = new Uuid();
long unixTimeAtTestStart = time.nowInUnixTime();
Collector metricsCollector = new BasicCollector(mockServer, time, uuid);
// When
GenericContainer metricsRecord = metricsCollector.collectMetrics();
// Then
assertThat(metricsRecord).isInstanceOf(SupportKafkaMetricsBasic.class);
assertThat(metricsRecord.getSchema()).isEqualTo(SupportKafkaMetricsBasic.getClassSchema());
SupportKafkaMetricsBasic basicRecord = (SupportKafkaMetricsBasic) metricsRecord;
assertThat(basicRecord.getTimestamp()).isBetween(unixTimeAtTestStart, time.nowInUnixTime());
assertThat(basicRecord.getKafkaVersion()).isEqualTo(AppInfoParser.getVersion());
assertThat(basicRecord.getConfluentPlatformVersion()).isEqualTo(Version.getVersion());
assertThat(basicRecord.getCollectorState()).isEqualTo(metricsCollector.getRuntimeState().stateId());
assertThat(basicRecord.getBrokerProcessUUID()).isEqualTo(uuid.toString());
}
示例3: stop
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
private void stop(boolean swallowException) {
log.trace("Stopping the Connect group member.");
AtomicReference<Throwable> firstException = new AtomicReference<Throwable>();
this.stopped = true;
ClientUtils.closeQuietly(coordinator, "coordinator", firstException);
ClientUtils.closeQuietly(metrics, "consumer metrics", firstException);
ClientUtils.closeQuietly(client, "consumer network client", firstException);
AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId);
if (firstException.get() != null && !swallowException)
throw new KafkaException("Failed to stop the Connect group member", firstException.get());
else
log.debug("The Connect group member has stopped.");
}
示例4: close
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
private void close(long timeoutMs, boolean swallowException) {
log.trace("Closing the Kafka consumer.");
AtomicReference<Throwable> firstException = new AtomicReference<>();
this.closed = true;
try {
if (coordinator != null)
coordinator.close(Math.min(timeoutMs, requestTimeoutMs));
} catch (Throwable t) {
firstException.compareAndSet(null, t);
log.error("Failed to close coordinator", t);
}
ClientUtils.closeQuietly(fetcher, "fetcher", firstException);
ClientUtils.closeQuietly(interceptors, "consumer interceptors", firstException);
ClientUtils.closeQuietly(metrics, "consumer metrics", firstException);
ClientUtils.closeQuietly(client, "consumer network client", firstException);
ClientUtils.closeQuietly(keyDeserializer, "consumer key deserializer", firstException);
ClientUtils.closeQuietly(valueDeserializer, "consumer value deserializer", firstException);
AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId);
log.debug("The Kafka consumer has closed.");
Throwable exception = firstException.get();
if (exception != null && !swallowException) {
if (exception instanceof InterruptException) {
throw (InterruptException) exception;
}
throw new KafkaException("Failed to close kafka consumer", exception);
}
}
示例5: collectMetrics
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
/**
* @return A new metrics record, or null in case of any errors.
*/
@Override
public GenericContainer collectMetrics() {
SupportKafkaMetricsBasic metricsRecord = new SupportKafkaMetricsBasic();
metricsRecord.setTimestamp(time.nowInUnixTime());
metricsRecord.setKafkaVersion(AppInfoParser.getVersion());
metricsRecord.setConfluentPlatformVersion(Version.getVersion());
metricsRecord.setCollectorState(this.getRuntimeState().stateId());
metricsRecord.setBrokerProcessUUID(uuid.toString());
metricsRecord.setClusterId(server.clusterId());
return metricsRecord;
}
示例6: verifyBasicMetrics
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
private static void verifyBasicMetrics(SupportKafkaMetricsBasic basicRecord) {
TimeUtils time = new TimeUtils();
assertThat(basicRecord.getTimestamp()).isLessThanOrEqualTo(time.nowInUnixTime());
assertThat(basicRecord.getKafkaVersion()).isEqualTo(AppInfoParser.getVersion());
assertThat(basicRecord.getConfluentPlatformVersion()).isEqualTo(Version.getVersion());
assertThat(basicRecord.getBrokerProcessUUID()).isNotEmpty();
}
示例7: version
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Override
public String version() {
return AppInfoParser.getVersion();
}
示例8: version
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Override
public String version() {
return AppInfoParser.getVersion();
}
示例9: version
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Override
public String version() {
// Currently using Kafka version, in future release use Kinesis-Kafka version
return AppInfoParser.getVersion();
}
示例10: version
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Override
public String version() {
// Currently using Kafka version, in future release use Kinesis-Kafka version
return AppInfoParser.getVersion();
}
示例11: version
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Override
public String version()
{
return AppInfoParser.getVersion();
}
示例12: ServerInfo
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
public ServerInfo() {
version = AppInfoParser.getVersion();
commit = AppInfoParser.getCommitId();
}
示例13: WorkerGroupMember
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
public WorkerGroupMember(DistributedConfig config,
String restUrl,
ConfigBackingStore configStorage,
WorkerRebalanceListener listener,
Time time) {
try {
this.time = time;
String clientIdConfig = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG);
clientId = clientIdConfig.length() <= 0 ? "connect-" + CONNECT_CLIENT_ID_SEQUENCE.getAndIncrement() : clientIdConfig;
Map<String, String> metricsTags = new LinkedHashMap<>();
metricsTags.put("client-id", clientId);
MetricConfig metricConfig = new MetricConfig().samples(config.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG))
.timeWindow(config.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
.tags(metricsTags);
List<MetricsReporter> reporters = config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class);
reporters.add(new JmxReporter(JMX_PREFIX));
this.metrics = new Metrics(metricConfig, reporters, time);
this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
this.metadata = new Metadata(retryBackoffMs, config.getLong(CommonClientConfigs.METADATA_MAX_AGE_CONFIG), true);
List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(config.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG));
this.metadata.update(Cluster.bootstrap(addresses), Collections.<String>emptySet(), 0);
String metricGrpPrefix = "connect";
ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config);
NetworkClient netClient = new NetworkClient(
new Selector(config.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder),
this.metadata,
clientId,
100, // a fixed large enough value will suffice
config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG),
config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG),
config.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG),
config.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG),
config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
time,
true,
new ApiVersions());
this.client = new ConsumerNetworkClient(netClient, metadata, time, retryBackoffMs,
config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG));
this.coordinator = new WorkerCoordinator(this.client,
config.getString(DistributedConfig.GROUP_ID_CONFIG),
config.getInt(DistributedConfig.REBALANCE_TIMEOUT_MS_CONFIG),
config.getInt(DistributedConfig.SESSION_TIMEOUT_MS_CONFIG),
config.getInt(DistributedConfig.HEARTBEAT_INTERVAL_MS_CONFIG),
metrics,
metricGrpPrefix,
this.time,
retryBackoffMs,
restUrl,
configStorage,
listener);
AppInfoParser.registerAppInfo(JMX_PREFIX, clientId);
log.debug("Connect group member created");
} catch (Throwable t) {
// call close methods if internal objects are already constructed
// this is to prevent resource leak. see KAFKA-2121
stop(true);
// now propagate the exception
throw new KafkaException("Failed to construct kafka consumer", t);
}
}
示例14: close
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
private void close(long timeout, TimeUnit timeUnit, boolean swallowException) {
if (timeout < 0)
throw new IllegalArgumentException("The timeout cannot be negative.");
log.info("Closing the Kafka producer with timeoutMillis = {} ms.", timeUnit.toMillis(timeout));
// this will keep track of the first encountered exception
AtomicReference<Throwable> firstException = new AtomicReference<>();
boolean invokedFromCallback = Thread.currentThread() == this.ioThread;
if (timeout > 0) {
if (invokedFromCallback) {
log.warn("Overriding close timeout {} ms to 0 ms in order to prevent useless blocking due to self-join. " +
"This means you have incorrectly invoked close with a non-zero timeout from the producer call-back.", timeout);
} else {
// Try to close gracefully.
if (this.sender != null)
this.sender.initiateClose();
if (this.ioThread != null) {
try {
this.ioThread.join(timeUnit.toMillis(timeout));
} catch (InterruptedException t) {
firstException.compareAndSet(null, t);
log.error("Interrupted while joining ioThread", t);
}
}
}
}
if (this.sender != null && this.ioThread != null && this.ioThread.isAlive()) {
log.info("Proceeding to force close the producer since pending requests could not be completed " +
"within timeout {} ms.", timeout);
this.sender.forceClose();
// Only join the sender thread when not calling from callback.
if (!invokedFromCallback) {
try {
this.ioThread.join();
} catch (InterruptedException e) {
firstException.compareAndSet(null, e);
}
}
}
ClientUtils.closeQuietly(interceptors, "producer interceptors", firstException);
ClientUtils.closeQuietly(metrics, "producer metrics", firstException);
ClientUtils.closeQuietly(keySerializer, "producer keySerializer", firstException);
ClientUtils.closeQuietly(valueSerializer, "producer valueSerializer", firstException);
ClientUtils.closeQuietly(partitioner, "producer partitioner", firstException);
AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId);
log.debug("The Kafka producer has closed.");
if (firstException.get() != null && !swallowException)
throw new KafkaException("Failed to close kafka producer", firstException.get());
}
示例15: version
import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
public String version() {
return AppInfoParser.getVersion();
}