当前位置: 首页>>代码示例>>Java>>正文


Java Selector类代码示例

本文整理汇总了Java中org.apache.kafka.common.network.Selector的典型用法代码示例。如果您正苦于以下问题:Java Selector类的具体用法?Java Selector怎么用?Java Selector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Selector类属于org.apache.kafka.common.network包,在下文中一共展示了Selector类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: MetadataClient

import org.apache.kafka.common.network.Selector; //导入依赖的package包/类
public MetadataClient(KafkaCruiseControlConfig config,
                      Metadata metadata,
                      long metadataTTL,
                      Time time) {
  _metadataGeneration = new AtomicInteger(0);
  _metadata = metadata;
  _time = time;
  List<InetSocketAddress> addresses =
      ClientUtils.parseAndValidateAddresses(config.getList(KafkaCruiseControlConfig.BOOTSTRAP_SERVERS_CONFIG));
  _metadata.update(Cluster.bootstrap(addresses), Collections.emptySet(), time.milliseconds());
  ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config.values());
  _networkClient = new NetworkClient(
      new Selector(config.getLong(KafkaCruiseControlConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG), new Metrics(), time, "load-monitor", channelBuilder),
      _metadata,
      config.getString(KafkaCruiseControlConfig.CLIENT_ID_CONFIG),
      DEFAULT_MAX_IN_FLIGHT_REQUEST,
      config.getLong(KafkaCruiseControlConfig.RECONNECT_BACKOFF_MS_CONFIG),
      config.getInt(KafkaCruiseControlConfig.SEND_BUFFER_CONFIG),
      config.getInt(KafkaCruiseControlConfig.RECEIVE_BUFFER_CONFIG),
      config.getInt(KafkaCruiseControlConfig.REQUEST_TIMEOUT_MS_CONFIG),
      _time,
      true);
  _metadataTTL = metadataTTL;
  // This is a super confusing interface in the Metadata. If we don't set this to false, the metadata.update()
  // will remove all the topics that are not in the metadata interested topics list.
  _metadata.addListener((cluster, unavailableTopics) -> _metadata.needMetadataForAllTopics(false));
}
 
开发者ID:linkedin,项目名称:cruise-control,代码行数:28,代码来源:MetadataClient.java

示例2: WorkerGroupMember

import org.apache.kafka.common.network.Selector; //导入依赖的package包/类
public WorkerGroupMember(DistributedConfig config,
                         String restUrl,
                         ConfigBackingStore configStorage,
                         WorkerRebalanceListener listener,
                         Time time) {
    try {
        this.time = time;

        String clientIdConfig = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG);
        clientId = clientIdConfig.length() <= 0 ? "connect-" + CONNECT_CLIENT_ID_SEQUENCE.getAndIncrement() : clientIdConfig;
        Map<String, String> metricsTags = new LinkedHashMap<>();
        metricsTags.put("client-id", clientId);
        MetricConfig metricConfig = new MetricConfig().samples(config.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG))
                .timeWindow(config.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
                .tags(metricsTags);
        List<MetricsReporter> reporters = config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class);
        reporters.add(new JmxReporter(JMX_PREFIX));
        this.metrics = new Metrics(metricConfig, reporters, time);
        this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
        this.metadata = new Metadata(retryBackoffMs, config.getLong(CommonClientConfigs.METADATA_MAX_AGE_CONFIG), true);
        List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(config.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG));
        this.metadata.update(Cluster.bootstrap(addresses), Collections.<String>emptySet(), 0);
        String metricGrpPrefix = "connect";
        ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config);
        NetworkClient netClient = new NetworkClient(
                new Selector(config.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder),
                this.metadata,
                clientId,
                100, // a fixed large enough value will suffice
                config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG),
                config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG),
                config.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG),
                config.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG),
                config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
                time,
                true,
                new ApiVersions());
        this.client = new ConsumerNetworkClient(netClient, metadata, time, retryBackoffMs,
                config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG));
        this.coordinator = new WorkerCoordinator(this.client,
                config.getString(DistributedConfig.GROUP_ID_CONFIG),
                config.getInt(DistributedConfig.REBALANCE_TIMEOUT_MS_CONFIG),
                config.getInt(DistributedConfig.SESSION_TIMEOUT_MS_CONFIG),
                config.getInt(DistributedConfig.HEARTBEAT_INTERVAL_MS_CONFIG),
                metrics,
                metricGrpPrefix,
                this.time,
                retryBackoffMs,
                restUrl,
                configStorage,
                listener);

        AppInfoParser.registerAppInfo(JMX_PREFIX, clientId);
        log.debug("Connect group member created");
    } catch (Throwable t) {
        // call close methods if internal objects are already constructed
        // this is to prevent resource leak. see KAFKA-2121
        stop(true);
        // now propagate the exception
        throw new KafkaException("Failed to construct kafka consumer", t);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:63,代码来源:WorkerGroupMember.java

示例3: StreamsKafkaClient

import org.apache.kafka.common.network.Selector; //导入依赖的package包/类
public StreamsKafkaClient(final Config streamsConfig) {
    this.streamsConfig = streamsConfig;

    final Time time = new SystemTime();

    final Map<String, String> metricTags = new LinkedHashMap<>();
    metricTags.put("client-id", StreamsConfig.CLIENT_ID_CONFIG);

    final Metadata metadata = new Metadata(streamsConfig.getLong(
        StreamsConfig.RETRY_BACKOFF_MS_CONFIG),
        streamsConfig.getLong(StreamsConfig.METADATA_MAX_AGE_CONFIG),
        false
    );
    final List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(streamsConfig.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG));
    metadata.update(Cluster.bootstrap(addresses), Collections.<String>emptySet(), time.milliseconds());

    final MetricConfig metricConfig = new MetricConfig().samples(streamsConfig.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG))
            .timeWindow(streamsConfig.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
            .tags(metricTags);
    reporters = streamsConfig.getConfiguredInstances(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG,
            MetricsReporter.class);
    // TODO: This should come from the KafkaStream
    reporters.add(new JmxReporter("kafka.admin"));
    final Metrics metrics = new Metrics(metricConfig, reporters, time);

    final ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(streamsConfig);

    final Selector selector = new Selector(
        streamsConfig.getLong(StreamsConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG),
        metrics,
        time,
        "kafka-client",
        channelBuilder);

    kafkaClient = new NetworkClient(
        selector,
        metadata,
        streamsConfig.getString(StreamsConfig.CLIENT_ID_CONFIG),
        MAX_INFLIGHT_REQUESTS, // a fixed large enough value will suffice
        streamsConfig.getLong(StreamsConfig.RECONNECT_BACKOFF_MS_CONFIG),
        streamsConfig.getLong(StreamsConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG),
        streamsConfig.getInt(StreamsConfig.SEND_BUFFER_CONFIG),
        streamsConfig.getInt(StreamsConfig.RECEIVE_BUFFER_CONFIG),
        streamsConfig.getInt(StreamsConfig.REQUEST_TIMEOUT_MS_CONFIG),
        time,
        true,
        new ApiVersions());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:49,代码来源:StreamsKafkaClient.java


注:本文中的org.apache.kafka.common.network.Selector类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。