当前位置: 首页>>代码示例>>Java>>正文


Java AppInfoParser类代码示例

本文整理汇总了Java中org.apache.kafka.common.utils.AppInfoParser的典型用法代码示例。如果您正苦于以下问题:Java AppInfoParser类的具体用法?Java AppInfoParser怎么用?Java AppInfoParser使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


AppInfoParser类属于org.apache.kafka.common.utils包,在下文中一共展示了AppInfoParser类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: close

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
private void close(boolean swallowException) {
    log.trace("Closing the Kafka consumer.");
    AtomicReference<Throwable> firstException = new AtomicReference<>();
    this.closed = true;
    ClientUtils.closeQuietly(coordinator, "coordinator", firstException);
    ClientUtils.closeQuietly(interceptors, "consumer interceptors", firstException);
    ClientUtils.closeQuietly(metrics, "consumer metrics", firstException);
    ClientUtils.closeQuietly(client, "consumer network client", firstException);
    ClientUtils.closeQuietly(keyDeserializer, "consumer key deserializer", firstException);
    ClientUtils.closeQuietly(valueDeserializer, "consumer value deserializer", firstException);
    AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId);
    log.debug("The Kafka consumer has closed.");
    if (firstException.get() != null && !swallowException) {
        throw new KafkaException("Failed to close kafka consumer", firstException.get());
    }
}
 
开发者ID:txazo,项目名称:kafka,代码行数:17,代码来源:KafkaConsumer.java

示例2: testCollectMetrics

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Test
public void testCollectMetrics() {
  // Given
  TimeUtils time = new TimeUtils();
  Uuid uuid = new Uuid();
  long unixTimeAtTestStart = time.nowInUnixTime();
  Collector metricsCollector = new BasicCollector(mockServer, time, uuid);

  // When
  GenericContainer metricsRecord = metricsCollector.collectMetrics();

  // Then
  assertThat(metricsRecord).isInstanceOf(SupportKafkaMetricsBasic.class);
  assertThat(metricsRecord.getSchema()).isEqualTo(SupportKafkaMetricsBasic.getClassSchema());
  SupportKafkaMetricsBasic basicRecord = (SupportKafkaMetricsBasic) metricsRecord;
  assertThat(basicRecord.getTimestamp()).isBetween(unixTimeAtTestStart, time.nowInUnixTime());
  assertThat(basicRecord.getKafkaVersion()).isEqualTo(AppInfoParser.getVersion());
  assertThat(basicRecord.getConfluentPlatformVersion()).isEqualTo(Version.getVersion());
  assertThat(basicRecord.getCollectorState()).isEqualTo(metricsCollector.getRuntimeState().stateId());
  assertThat(basicRecord.getBrokerProcessUUID()).isEqualTo(uuid.toString());
}
 
开发者ID:confluentinc,项目名称:support-metrics-client,代码行数:22,代码来源:BasicCollectorTest.java

示例3: stop

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
private void stop(boolean swallowException) {
    log.trace("Stopping the Connect group member.");
    AtomicReference<Throwable> firstException = new AtomicReference<Throwable>();
    this.stopped = true;
    ClientUtils.closeQuietly(coordinator, "coordinator", firstException);
    ClientUtils.closeQuietly(metrics, "consumer metrics", firstException);
    ClientUtils.closeQuietly(client, "consumer network client", firstException);
    AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId);
    if (firstException.get() != null && !swallowException)
        throw new KafkaException("Failed to stop the Connect group member", firstException.get());
    else
        log.debug("The Connect group member has stopped.");
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:14,代码来源:WorkerGroupMember.java

示例4: close

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
private void close(long timeoutMs, boolean swallowException) {
    log.trace("Closing the Kafka consumer.");
    AtomicReference<Throwable> firstException = new AtomicReference<>();
    this.closed = true;
    try {
        if (coordinator != null)
            coordinator.close(Math.min(timeoutMs, requestTimeoutMs));
    } catch (Throwable t) {
        firstException.compareAndSet(null, t);
        log.error("Failed to close coordinator", t);
    }
    ClientUtils.closeQuietly(fetcher, "fetcher", firstException);
    ClientUtils.closeQuietly(interceptors, "consumer interceptors", firstException);
    ClientUtils.closeQuietly(metrics, "consumer metrics", firstException);
    ClientUtils.closeQuietly(client, "consumer network client", firstException);
    ClientUtils.closeQuietly(keyDeserializer, "consumer key deserializer", firstException);
    ClientUtils.closeQuietly(valueDeserializer, "consumer value deserializer", firstException);
    AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId);
    log.debug("The Kafka consumer has closed.");
    Throwable exception = firstException.get();
    if (exception != null && !swallowException) {
        if (exception instanceof InterruptException) {
            throw (InterruptException) exception;
        }
        throw new KafkaException("Failed to close kafka consumer", exception);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:28,代码来源:KafkaConsumer.java

示例5: collectMetrics

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
/**
 * @return A new metrics record, or null in case of any errors.
 */
@Override
public GenericContainer collectMetrics() {
  SupportKafkaMetricsBasic metricsRecord = new SupportKafkaMetricsBasic();
  metricsRecord.setTimestamp(time.nowInUnixTime());
  metricsRecord.setKafkaVersion(AppInfoParser.getVersion());
  metricsRecord.setConfluentPlatformVersion(Version.getVersion());
  metricsRecord.setCollectorState(this.getRuntimeState().stateId());
  metricsRecord.setBrokerProcessUUID(uuid.toString());
  metricsRecord.setClusterId(server.clusterId());
  return metricsRecord;
}
 
开发者ID:confluentinc,项目名称:support-metrics-client,代码行数:15,代码来源:BasicCollector.java

示例6: verifyBasicMetrics

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
private static void verifyBasicMetrics(SupportKafkaMetricsBasic basicRecord) {
  TimeUtils time = new TimeUtils();
  assertThat(basicRecord.getTimestamp()).isLessThanOrEqualTo(time.nowInUnixTime());
  assertThat(basicRecord.getKafkaVersion()).isEqualTo(AppInfoParser.getVersion());
  assertThat(basicRecord.getConfluentPlatformVersion()).isEqualTo(Version.getVersion());
  assertThat(basicRecord.getBrokerProcessUUID()).isNotEmpty();
}
 
开发者ID:confluentinc,项目名称:support-metrics-client,代码行数:8,代码来源:MetricsToKafkaTest.java

示例7: version

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Override
public String version() {
  return AppInfoParser.getVersion();
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:5,代码来源:SchemaSourceConnector.java

示例8: version

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Override
public String version() {
    return AppInfoParser.getVersion();
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:5,代码来源:FileStreamSourceConnector.java

示例9: version

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Override
public String version() {
	// Currently using Kafka version, in future release use Kinesis-Kafka version
	return AppInfoParser.getVersion();

}
 
开发者ID:awslabs,项目名称:kinesis-kafka-connector,代码行数:7,代码来源:AmazonKinesisSinkConnector.java

示例10: version

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Override
public String version() {
	// Currently using Kafka version, in future release use Kinesis-Kafka version
	return AppInfoParser.getVersion();
}
 
开发者ID:awslabs,项目名称:kinesis-kafka-connector,代码行数:6,代码来源:FirehoseSinkConnector.java

示例11: version

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
@Override
public String version()
{
  return AppInfoParser.getVersion();
}
 
开发者ID:ampool,项目名称:monarch,代码行数:6,代码来源:AmpoolSinkConnector.java

示例12: ServerInfo

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
public ServerInfo() {
    version = AppInfoParser.getVersion();
    commit = AppInfoParser.getCommitId();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:5,代码来源:ServerInfo.java

示例13: WorkerGroupMember

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
public WorkerGroupMember(DistributedConfig config,
                         String restUrl,
                         ConfigBackingStore configStorage,
                         WorkerRebalanceListener listener,
                         Time time) {
    try {
        this.time = time;

        String clientIdConfig = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG);
        clientId = clientIdConfig.length() <= 0 ? "connect-" + CONNECT_CLIENT_ID_SEQUENCE.getAndIncrement() : clientIdConfig;
        Map<String, String> metricsTags = new LinkedHashMap<>();
        metricsTags.put("client-id", clientId);
        MetricConfig metricConfig = new MetricConfig().samples(config.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG))
                .timeWindow(config.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
                .tags(metricsTags);
        List<MetricsReporter> reporters = config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class);
        reporters.add(new JmxReporter(JMX_PREFIX));
        this.metrics = new Metrics(metricConfig, reporters, time);
        this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
        this.metadata = new Metadata(retryBackoffMs, config.getLong(CommonClientConfigs.METADATA_MAX_AGE_CONFIG), true);
        List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(config.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG));
        this.metadata.update(Cluster.bootstrap(addresses), Collections.<String>emptySet(), 0);
        String metricGrpPrefix = "connect";
        ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config);
        NetworkClient netClient = new NetworkClient(
                new Selector(config.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder),
                this.metadata,
                clientId,
                100, // a fixed large enough value will suffice
                config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG),
                config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG),
                config.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG),
                config.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG),
                config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
                time,
                true,
                new ApiVersions());
        this.client = new ConsumerNetworkClient(netClient, metadata, time, retryBackoffMs,
                config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG));
        this.coordinator = new WorkerCoordinator(this.client,
                config.getString(DistributedConfig.GROUP_ID_CONFIG),
                config.getInt(DistributedConfig.REBALANCE_TIMEOUT_MS_CONFIG),
                config.getInt(DistributedConfig.SESSION_TIMEOUT_MS_CONFIG),
                config.getInt(DistributedConfig.HEARTBEAT_INTERVAL_MS_CONFIG),
                metrics,
                metricGrpPrefix,
                this.time,
                retryBackoffMs,
                restUrl,
                configStorage,
                listener);

        AppInfoParser.registerAppInfo(JMX_PREFIX, clientId);
        log.debug("Connect group member created");
    } catch (Throwable t) {
        // call close methods if internal objects are already constructed
        // this is to prevent resource leak. see KAFKA-2121
        stop(true);
        // now propagate the exception
        throw new KafkaException("Failed to construct kafka consumer", t);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:63,代码来源:WorkerGroupMember.java

示例14: close

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
private void close(long timeout, TimeUnit timeUnit, boolean swallowException) {
    if (timeout < 0)
        throw new IllegalArgumentException("The timeout cannot be negative.");

    log.info("Closing the Kafka producer with timeoutMillis = {} ms.", timeUnit.toMillis(timeout));
    // this will keep track of the first encountered exception
    AtomicReference<Throwable> firstException = new AtomicReference<>();
    boolean invokedFromCallback = Thread.currentThread() == this.ioThread;
    if (timeout > 0) {
        if (invokedFromCallback) {
            log.warn("Overriding close timeout {} ms to 0 ms in order to prevent useless blocking due to self-join. " +
                    "This means you have incorrectly invoked close with a non-zero timeout from the producer call-back.", timeout);
        } else {
            // Try to close gracefully.
            if (this.sender != null)
                this.sender.initiateClose();
            if (this.ioThread != null) {
                try {
                    this.ioThread.join(timeUnit.toMillis(timeout));
                } catch (InterruptedException t) {
                    firstException.compareAndSet(null, t);
                    log.error("Interrupted while joining ioThread", t);
                }
            }
        }
    }

    if (this.sender != null && this.ioThread != null && this.ioThread.isAlive()) {
        log.info("Proceeding to force close the producer since pending requests could not be completed " +
                "within timeout {} ms.", timeout);
        this.sender.forceClose();
        // Only join the sender thread when not calling from callback.
        if (!invokedFromCallback) {
            try {
                this.ioThread.join();
            } catch (InterruptedException e) {
                firstException.compareAndSet(null, e);
            }
        }
    }

    ClientUtils.closeQuietly(interceptors, "producer interceptors", firstException);
    ClientUtils.closeQuietly(metrics, "producer metrics", firstException);
    ClientUtils.closeQuietly(keySerializer, "producer keySerializer", firstException);
    ClientUtils.closeQuietly(valueSerializer, "producer valueSerializer", firstException);
    ClientUtils.closeQuietly(partitioner, "producer partitioner", firstException);
    AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId);
    log.debug("The Kafka producer has closed.");
    if (firstException.get() != null && !swallowException)
        throw new KafkaException("Failed to close kafka producer", firstException.get());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:52,代码来源:KafkaProducer.java

示例15: version

import org.apache.kafka.common.utils.AppInfoParser; //导入依赖的package包/类
public String version() {
  return AppInfoParser.getVersion();
}
 
开发者ID:oystparis,项目名称:kafka-connect-nats,代码行数:4,代码来源:NatsSourceTask.java


注:本文中的org.apache.kafka.common.utils.AppInfoParser类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。