本文整理汇总了Python中kafka.client_async.KafkaClient.least_loaded_node方法的典型用法代码示例。如果您正苦于以下问题:Python KafkaClient.least_loaded_node方法的具体用法?Python KafkaClient.least_loaded_node怎么用?Python KafkaClient.least_loaded_node使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.client_async.KafkaClient
的用法示例。
在下文中一共展示了KafkaClient.least_loaded_node方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: OffsetsFetcherAsync
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import least_loaded_node [as 别名]
class OffsetsFetcherAsync(object):
DEFAULT_CONFIG = {
'session_timeout_ms': 30000,
'heartbeat_interval_ms': 3000,
'retry_backoff_ms': 100,
'api_version': (0, 9),
'metric_group_prefix': ''
}
def __init__(self, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
self.config.update(configs)
self._client = KafkaClient(**self.config)
self._coordinator_id = None
self.group_id = configs['group_id']
self.topic = configs['topic']
def _ensure_coordinator_known(self):
"""Block until the coordinator for this group is known
(and we have an active connection -- java client uses unsent queue).
"""
while self._coordinator_unknown():
# Prior to 0.8.2 there was no group coordinator
# so we will just pick a node at random and treat
# it as the "coordinator"
if self.config['api_version'] < (0, 8, 2):
self._coordinator_id = self._client.least_loaded_node()
self._client.ready(self._coordinator_id)
continue
future = self._send_group_coordinator_request()
self._client.poll(future=future)
if future.failed():
if isinstance(future.exception,
Errors.GroupCoordinatorNotAvailableError):
continue
elif future.retriable():
metadata_update = self._client.cluster.request_update()
self._client.poll(future=metadata_update)
else:
raise future.exception # pylint: disable-msg=raising-bad-type
def _coordinator_unknown(self):
"""Check if we know who the coordinator is and have an active connection
Side-effect: reset _coordinator_id to None if connection failed
Returns:
bool: True if the coordinator is unknown
"""
if self._coordinator_id is None:
return True
if self._client.is_disconnected(self._coordinator_id):
self._coordinator_dead()
return True
return False
def _coordinator_dead(self, error=None):
"""Mark the current coordinator as dead."""
if self._coordinator_id is not None:
log.warning("Marking the coordinator dead (node %s) for group %s: %s.",
self._coordinator_id, self.group_id, error)
self._coordinator_id = None
def _send_group_coordinator_request(self):
"""Discover the current coordinator for the group.
Returns:
Future: resolves to the node id of the coordinator
"""
node_id = self._client.least_loaded_node()
if node_id is None:
return Future().failure(Errors.NoBrokersAvailable())
log.debug("Sending group coordinator request for group %s to broker %s",
self.group_id, node_id)
request = GroupCoordinatorRequest[0](self.group_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_group_coordinator_response, future)
_f.add_errback(self._failed_request, node_id, request, future)
return future
def _handle_group_coordinator_response(self, future, response):
log.debug("Received group coordinator response %s", response)
if not self._coordinator_unknown():
# We already found the coordinator, so ignore the request
log.debug("Coordinator already known -- ignoring metadata response")
future.success(self._coordinator_id)
return
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
ok = self._client.cluster.add_group_coordinator(self.group_id, response)
if not ok:
#.........这里部分代码省略.........
示例2: KafkaAdminClient
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import least_loaded_node [as 别名]
#.........这里部分代码省略.........
def _matching_api_version(self, operation):
"""Find the latest version of the protocol operation supported by both
this library and the broker.
This resolves to the lesser of either the latest api version this
library supports, or the max version supported by the broker.
:param operation: A list of protocol operation versions from kafka.protocol.
:return: The max matching version number between client and broker.
"""
version = min(len(operation) - 1,
self._client.get_api_versions()[operation[0].API_KEY][1])
if version < self._client.get_api_versions()[operation[0].API_KEY][0]:
# max library version is less than min broker version. Currently,
# no Kafka versions specify a min msg version. Maybe in the future?
raise IncompatibleBrokerVersion(
"No version of the '{}' Kafka protocol is supported by both the client and broker."
.format(operation.__name__))
return version
def _validate_timeout(self, timeout_ms):
"""Validate the timeout is set or use the configuration default.
:param timeout_ms: The timeout provided by api call, in milliseconds.
:return: The timeout to use for the operation.
"""
return timeout_ms or self.config['request_timeout_ms']
def _refresh_controller_id(self):
"""Determine the Kafka cluster controller."""
version = self._matching_api_version(MetadataRequest)
if 1 <= version <= 6:
request = MetadataRequest[version]()
response = self._send_request_to_node(self._client.least_loaded_node(), request)
controller_id = response.controller_id
# verify the controller is new enough to support our requests
controller_version = self._client.check_version(controller_id)
if controller_version < (0, 10, 0):
raise IncompatibleBrokerVersion(
"The controller appears to be running Kafka {}. KafkaAdminClient requires brokers >= 0.10.0.0."
.format(controller_version))
self._controller_id = controller_id
else:
raise UnrecognizedBrokerVersion(
"Kafka Admin interface cannot determine the controller using MetadataRequest_v{}."
.format(version))
def _find_group_coordinator_id(self, group_id):
"""Find the broker node_id of the coordinator of the given group.
Sends a FindCoordinatorRequest message to the cluster. Will block until
the FindCoordinatorResponse is received. Any errors are immediately
raised.
:param group_id: The consumer group ID. This is typically the group
name as a string.
:return: The node_id of the broker that is the coordinator.
"""
# Note: Java may change how this is implemented in KAFKA-6791.
#
# TODO add support for dynamically picking version of
# GroupCoordinatorRequest which was renamed to FindCoordinatorRequest.
# When I experimented with this, GroupCoordinatorResponse_v1 didn't
# match GroupCoordinatorResponse_v0 and I couldn't figure out why.
gc_request = GroupCoordinatorRequest[0](group_id)
gc_response = self._send_request_to_node(self._client.least_loaded_node(), gc_request)