本文整理汇总了Python中kafka.client_async.KafkaClient.check_version方法的典型用法代码示例。如果您正苦于以下问题:Python KafkaClient.check_version方法的具体用法?Python KafkaClient.check_version怎么用?Python KafkaClient.check_version使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.client_async.KafkaClient
的用法示例。
在下文中一共展示了KafkaClient.check_version方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import check_version [as 别名]
class KafkaConsumerLag:
def __init__(self, bootstrap_servers):
self.client = KafkaClient(bootstrap_servers=bootstrap_servers)
self.client.check_version()
def _send(self, broker_id, request, response_type=None):
f = self.client.send(broker_id, request)
response = self.client.poll(future=f)
if response_type:
if response and len(response) > 0:
for r in response:
if isinstance(r, response_type):
return r
else:
if response and len(response) > 0:
return response[0]
return None
def check(self, group_topics=None, discovery=None):
"""
{
"<group>": {
"state": <str>,
"topics": {
"<topic>": {
"consumer_lag": <int>,
"partitions": {
"<partition>": {
"offset_first": <int>,
"offset_consumed": <int>,
"offset_last": <int>,
"lag": <int>
}
}
}
}
}
}
:param persist_groups:
:return: consumer statistics
"""
cluster = self.client.cluster
brokers = cluster.brokers()
# Consumer group ID -> list(topics)
if group_topics is None:
group_topics = {}
if discovery is None:
discovery = True
else:
group_topics = copy.deepcopy(group_topics)
# Set of consumer group IDs
consumer_groups = set(group_topics.iterkeys())
# Set of all known topics
topics = set(itertools.chain(*group_topics.itervalues()))
# Consumer group ID -> coordinating broker
consumer_coordinator = {}
# Coordinating broker - > list(consumer group IDs)
coordinator_consumers = {}
results = {}
for consumer_group in group_topics.iterkeys():
results[consumer_group] = {'state': None, 'topics': {}}
# Ensure connections to all brokers
for broker in brokers:
while not self.client.is_ready(broker.nodeId):
self.client.ready(broker.nodeId)
# Collect all active consumer groups
if discovery:
for broker in brokers:
response = self._send(broker.nodeId, _ListGroupsRequest(), _ListGroupsResponse)
if response:
for group in response.groups:
consumer_groups.add(group[0])
# Identify which broker is coordinating each consumer group
for group in consumer_groups:
response = self._send(next(iter(brokers)).nodeId, _GroupCoordinatorRequest(group), _GroupCoordinatorResponse)
if response:
consumer_coordinator[group] = response.coordinator_id
if response.coordinator_id not in coordinator_consumers:
coordinator_consumers[response.coordinator_id] = []
#.........这里部分代码省略.........
示例2: KafkaConsumer
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import check_version [as 别名]
#.........这里部分代码省略.........
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'api_version': 'auto',
'connections_max_idle_ms': 9 * 60 * 1000, # not implemented yet
#'metric_reporters': None,
#'metrics_num_samples': 2,
#'metrics_sample_window_ms': 30000,
}
def __init__(self, *topics, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, 'Unrecognized configs: %s' % configs
deprecated = {'smallest': 'earliest', 'largest': 'latest' }
if self.config['auto_offset_reset'] in deprecated:
new_config = deprecated[self.config['auto_offset_reset']]
log.warning('use auto_offset_reset=%s (%s is deprecated)',
new_config, self.config['auto_offset_reset'])
self.config['auto_offset_reset'] = new_config
self._client = KafkaClient(**self.config)
# Check Broker Version if not set explicitly
if self.config['api_version'] == 'auto':
self.config['api_version'] = self._client.check_version()
assert self.config['api_version'] in ('0.9', '0.8.2', '0.8.1', '0.8.0'), 'Unrecognized api version'
# Convert api_version config to tuple for easy comparisons
self.config['api_version'] = tuple(
map(int, self.config['api_version'].split('.')))
self._subscription = SubscriptionState(self.config['auto_offset_reset'])
self._fetcher = Fetcher(
self._client, self._subscription, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription,
assignors=self.config['partition_assignment_strategy'],
**self.config)
self._closed = False
self._iterator = None
self._consumer_timeout = float('inf')
#self.metrics = None
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
def assign(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): assignment for this instance.
Raises:
IllegalStateError: if consumer has already called subscribe()
Warning:
示例3: KafkaConsumer
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import check_version [as 别名]
#.........这里部分代码省略.........
"metadata_max_age_ms": 5 * 60 * 1000,
"partition_assignment_strategy": (RoundRobinPartitionAssignor,),
"heartbeat_interval_ms": 3000,
"session_timeout_ms": 30000,
"send_buffer_bytes": 128 * 1024,
"receive_buffer_bytes": 32 * 1024,
"consumer_timeout_ms": -1,
"api_version": "auto",
"connections_max_idle_ms": 9 * 60 * 1000, # not implemented yet
#'metric_reporters': None,
#'metrics_num_samples': 2,
#'metrics_sample_window_ms': 30000,
}
def __init__(self, *topics, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, "Unrecognized configs: %s" % configs
deprecated = {"smallest": "earliest", "largest": "latest"}
if self.config["auto_offset_reset"] in deprecated:
new_config = deprecated[self.config["auto_offset_reset"]]
log.warning("use auto_offset_reset=%s (%s is deprecated)", new_config, self.config["auto_offset_reset"])
self.config["auto_offset_reset"] = new_config
self._client = KafkaClient(**self.config)
# Check Broker Version if not set explicitly
if self.config["api_version"] == "auto":
self.config["api_version"] = self._client.check_version()
assert self.config["api_version"] in ("0.9", "0.8.2", "0.8.1", "0.8.0")
# Convert api_version config to tuple for easy comparisons
self.config["api_version"] = tuple(map(int, self.config["api_version"].split(".")))
self._subscription = SubscriptionState(self.config["auto_offset_reset"])
self._fetcher = Fetcher(self._client, self._subscription, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, assignors=self.config["partition_assignment_strategy"], **self.config
)
self._closed = False
self._iterator = None
self._consumer_timeout = float("inf")
# self.metrics = None
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
def assign(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): assignment for this instance.
Raises:
IllegalStateError: if consumer has already called subscribe()
Warning:
It is not possible to use both manual partition assignment with
assign() and group assignment with subscribe().
示例4: KafkaAdminClient
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import check_version [as 别名]
#.........这里部分代码省略.........
This resolves to the lesser of either the latest api version this
library supports, or the max version supported by the broker.
:param operation: A list of protocol operation versions from kafka.protocol.
:return: The max matching version number between client and broker.
"""
version = min(len(operation) - 1,
self._client.get_api_versions()[operation[0].API_KEY][1])
if version < self._client.get_api_versions()[operation[0].API_KEY][0]:
# max library version is less than min broker version. Currently,
# no Kafka versions specify a min msg version. Maybe in the future?
raise IncompatibleBrokerVersion(
"No version of the '{}' Kafka protocol is supported by both the client and broker."
.format(operation.__name__))
return version
def _validate_timeout(self, timeout_ms):
"""Validate the timeout is set or use the configuration default.
:param timeout_ms: The timeout provided by api call, in milliseconds.
:return: The timeout to use for the operation.
"""
return timeout_ms or self.config['request_timeout_ms']
def _refresh_controller_id(self):
"""Determine the Kafka cluster controller."""
version = self._matching_api_version(MetadataRequest)
if 1 <= version <= 6:
request = MetadataRequest[version]()
response = self._send_request_to_node(self._client.least_loaded_node(), request)
controller_id = response.controller_id
# verify the controller is new enough to support our requests
controller_version = self._client.check_version(controller_id)
if controller_version < (0, 10, 0):
raise IncompatibleBrokerVersion(
"The controller appears to be running Kafka {}. KafkaAdminClient requires brokers >= 0.10.0.0."
.format(controller_version))
self._controller_id = controller_id
else:
raise UnrecognizedBrokerVersion(
"Kafka Admin interface cannot determine the controller using MetadataRequest_v{}."
.format(version))
def _find_group_coordinator_id(self, group_id):
"""Find the broker node_id of the coordinator of the given group.
Sends a FindCoordinatorRequest message to the cluster. Will block until
the FindCoordinatorResponse is received. Any errors are immediately
raised.
:param group_id: The consumer group ID. This is typically the group
name as a string.
:return: The node_id of the broker that is the coordinator.
"""
# Note: Java may change how this is implemented in KAFKA-6791.
#
# TODO add support for dynamically picking version of
# GroupCoordinatorRequest which was renamed to FindCoordinatorRequest.
# When I experimented with this, GroupCoordinatorResponse_v1 didn't
# match GroupCoordinatorResponse_v0 and I couldn't figure out why.
gc_request = GroupCoordinatorRequest[0](group_id)
gc_response = self._send_request_to_node(self._client.least_loaded_node(), gc_request)
# use the extra error checking in add_group_coordinator() rather than
# immediately returning the group coordinator.
success = self._client.cluster.add_group_coordinator(group_id, gc_response)