本文整理汇总了Java中org.apache.kafka.common.network.Selector类的典型用法代码示例。如果您正苦于以下问题:Java Selector类的具体用法?Java Selector怎么用?Java Selector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Selector类属于org.apache.kafka.common.network包,在下文中一共展示了Selector类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: MetadataClient
import org.apache.kafka.common.network.Selector; //导入依赖的package包/类
public MetadataClient(KafkaCruiseControlConfig config,
Metadata metadata,
long metadataTTL,
Time time) {
_metadataGeneration = new AtomicInteger(0);
_metadata = metadata;
_time = time;
List<InetSocketAddress> addresses =
ClientUtils.parseAndValidateAddresses(config.getList(KafkaCruiseControlConfig.BOOTSTRAP_SERVERS_CONFIG));
_metadata.update(Cluster.bootstrap(addresses), Collections.emptySet(), time.milliseconds());
ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config.values());
_networkClient = new NetworkClient(
new Selector(config.getLong(KafkaCruiseControlConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG), new Metrics(), time, "load-monitor", channelBuilder),
_metadata,
config.getString(KafkaCruiseControlConfig.CLIENT_ID_CONFIG),
DEFAULT_MAX_IN_FLIGHT_REQUEST,
config.getLong(KafkaCruiseControlConfig.RECONNECT_BACKOFF_MS_CONFIG),
config.getInt(KafkaCruiseControlConfig.SEND_BUFFER_CONFIG),
config.getInt(KafkaCruiseControlConfig.RECEIVE_BUFFER_CONFIG),
config.getInt(KafkaCruiseControlConfig.REQUEST_TIMEOUT_MS_CONFIG),
_time,
true);
_metadataTTL = metadataTTL;
// This is a super confusing interface in the Metadata. If we don't set this to false, the metadata.update()
// will remove all the topics that are not in the metadata interested topics list.
_metadata.addListener((cluster, unavailableTopics) -> _metadata.needMetadataForAllTopics(false));
}
示例2: WorkerGroupMember
import org.apache.kafka.common.network.Selector; //导入依赖的package包/类
public WorkerGroupMember(DistributedConfig config,
String restUrl,
ConfigBackingStore configStorage,
WorkerRebalanceListener listener,
Time time) {
try {
this.time = time;
String clientIdConfig = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG);
clientId = clientIdConfig.length() <= 0 ? "connect-" + CONNECT_CLIENT_ID_SEQUENCE.getAndIncrement() : clientIdConfig;
Map<String, String> metricsTags = new LinkedHashMap<>();
metricsTags.put("client-id", clientId);
MetricConfig metricConfig = new MetricConfig().samples(config.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG))
.timeWindow(config.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
.tags(metricsTags);
List<MetricsReporter> reporters = config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class);
reporters.add(new JmxReporter(JMX_PREFIX));
this.metrics = new Metrics(metricConfig, reporters, time);
this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
this.metadata = new Metadata(retryBackoffMs, config.getLong(CommonClientConfigs.METADATA_MAX_AGE_CONFIG), true);
List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(config.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG));
this.metadata.update(Cluster.bootstrap(addresses), Collections.<String>emptySet(), 0);
String metricGrpPrefix = "connect";
ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config);
NetworkClient netClient = new NetworkClient(
new Selector(config.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder),
this.metadata,
clientId,
100, // a fixed large enough value will suffice
config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG),
config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG),
config.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG),
config.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG),
config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
time,
true,
new ApiVersions());
this.client = new ConsumerNetworkClient(netClient, metadata, time, retryBackoffMs,
config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG));
this.coordinator = new WorkerCoordinator(this.client,
config.getString(DistributedConfig.GROUP_ID_CONFIG),
config.getInt(DistributedConfig.REBALANCE_TIMEOUT_MS_CONFIG),
config.getInt(DistributedConfig.SESSION_TIMEOUT_MS_CONFIG),
config.getInt(DistributedConfig.HEARTBEAT_INTERVAL_MS_CONFIG),
metrics,
metricGrpPrefix,
this.time,
retryBackoffMs,
restUrl,
configStorage,
listener);
AppInfoParser.registerAppInfo(JMX_PREFIX, clientId);
log.debug("Connect group member created");
} catch (Throwable t) {
// call close methods if internal objects are already constructed
// this is to prevent resource leak. see KAFKA-2121
stop(true);
// now propagate the exception
throw new KafkaException("Failed to construct kafka consumer", t);
}
}
示例3: StreamsKafkaClient
import org.apache.kafka.common.network.Selector; //导入依赖的package包/类
public StreamsKafkaClient(final Config streamsConfig) {
this.streamsConfig = streamsConfig;
final Time time = new SystemTime();
final Map<String, String> metricTags = new LinkedHashMap<>();
metricTags.put("client-id", StreamsConfig.CLIENT_ID_CONFIG);
final Metadata metadata = new Metadata(streamsConfig.getLong(
StreamsConfig.RETRY_BACKOFF_MS_CONFIG),
streamsConfig.getLong(StreamsConfig.METADATA_MAX_AGE_CONFIG),
false
);
final List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(streamsConfig.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG));
metadata.update(Cluster.bootstrap(addresses), Collections.<String>emptySet(), time.milliseconds());
final MetricConfig metricConfig = new MetricConfig().samples(streamsConfig.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG))
.timeWindow(streamsConfig.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
.tags(metricTags);
reporters = streamsConfig.getConfiguredInstances(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG,
MetricsReporter.class);
// TODO: This should come from the KafkaStream
reporters.add(new JmxReporter("kafka.admin"));
final Metrics metrics = new Metrics(metricConfig, reporters, time);
final ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(streamsConfig);
final Selector selector = new Selector(
streamsConfig.getLong(StreamsConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG),
metrics,
time,
"kafka-client",
channelBuilder);
kafkaClient = new NetworkClient(
selector,
metadata,
streamsConfig.getString(StreamsConfig.CLIENT_ID_CONFIG),
MAX_INFLIGHT_REQUESTS, // a fixed large enough value will suffice
streamsConfig.getLong(StreamsConfig.RECONNECT_BACKOFF_MS_CONFIG),
streamsConfig.getLong(StreamsConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG),
streamsConfig.getInt(StreamsConfig.SEND_BUFFER_CONFIG),
streamsConfig.getInt(StreamsConfig.RECEIVE_BUFFER_CONFIG),
streamsConfig.getInt(StreamsConfig.REQUEST_TIMEOUT_MS_CONFIG),
time,
true,
new ApiVersions());
}