本文整理汇总了Python中kafka.client_async.KafkaClient.send方法的典型用法代码示例。如果您正苦于以下问题:Python KafkaClient.send方法的具体用法?Python KafkaClient.send怎么用?Python KafkaClient.send使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.client_async.KafkaClient
的用法示例。
在下文中一共展示了KafkaClient.send方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_send
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import send [as 别名]
def test_send(conn):
cli = KafkaClient()
try:
cli.send(2, None)
except Errors.NodeNotReadyError:
pass
else:
assert False, 'NodeNotReadyError not raised'
cli._initiate_connect(0)
# ProduceRequest w/ 0 required_acks -> no response
request = ProduceRequest(0, 0, [])
ret = cli.send(0, request)
assert conn.send.called_with(request, expect_response=False)
assert isinstance(ret, Future)
request = MetadataRequest([])
cli.send(0, request)
assert conn.send.called_with(request, expect_response=True)
示例2: test_send
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import send [as 别名]
def test_send(conn):
cli = KafkaClient()
# Send to unknown node => raises AssertionError
try:
cli.send(2, None)
assert False, 'Exception not raised'
except AssertionError:
pass
# Send to disconnected node => NodeNotReady
conn.state = ConnectionStates.DISCONNECTED
f = cli.send(0, None)
assert f.failed()
assert isinstance(f.exception, Errors.NodeNotReadyError)
conn.state = ConnectionStates.CONNECTED
cli._maybe_connect(0)
# ProduceRequest w/ 0 required_acks -> no response
request = ProduceRequest[0](0, 0, [])
ret = cli.send(0, request)
assert conn.send.called_with(request, expect_response=False)
assert isinstance(ret, Future)
request = MetadataRequest[0]([])
cli.send(0, request)
assert conn.send.called_with(request, expect_response=True)
示例3: OffsetsFetcherAsync
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import send [as 别名]
class OffsetsFetcherAsync(object):
DEFAULT_CONFIG = {
'session_timeout_ms': 30000,
'heartbeat_interval_ms': 3000,
'retry_backoff_ms': 100,
'api_version': (0, 9),
'metric_group_prefix': ''
}
def __init__(self, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
self.config.update(configs)
self._client = KafkaClient(**self.config)
self._coordinator_id = None
self.group_id = configs['group_id']
self.topic = configs['topic']
def _ensure_coordinator_known(self):
"""Block until the coordinator for this group is known
(and we have an active connection -- java client uses unsent queue).
"""
while self._coordinator_unknown():
# Prior to 0.8.2 there was no group coordinator
# so we will just pick a node at random and treat
# it as the "coordinator"
if self.config['api_version'] < (0, 8, 2):
self._coordinator_id = self._client.least_loaded_node()
self._client.ready(self._coordinator_id)
continue
future = self._send_group_coordinator_request()
self._client.poll(future=future)
if future.failed():
if isinstance(future.exception,
Errors.GroupCoordinatorNotAvailableError):
continue
elif future.retriable():
metadata_update = self._client.cluster.request_update()
self._client.poll(future=metadata_update)
else:
raise future.exception # pylint: disable-msg=raising-bad-type
def _coordinator_unknown(self):
"""Check if we know who the coordinator is and have an active connection
Side-effect: reset _coordinator_id to None if connection failed
Returns:
bool: True if the coordinator is unknown
"""
if self._coordinator_id is None:
return True
if self._client.is_disconnected(self._coordinator_id):
self._coordinator_dead()
return True
return False
def _coordinator_dead(self, error=None):
"""Mark the current coordinator as dead."""
if self._coordinator_id is not None:
log.warning("Marking the coordinator dead (node %s) for group %s: %s.",
self._coordinator_id, self.group_id, error)
self._coordinator_id = None
def _send_group_coordinator_request(self):
"""Discover the current coordinator for the group.
Returns:
Future: resolves to the node id of the coordinator
"""
node_id = self._client.least_loaded_node()
if node_id is None:
return Future().failure(Errors.NoBrokersAvailable())
log.debug("Sending group coordinator request for group %s to broker %s",
self.group_id, node_id)
request = GroupCoordinatorRequest[0](self.group_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_group_coordinator_response, future)
_f.add_errback(self._failed_request, node_id, request, future)
return future
def _handle_group_coordinator_response(self, future, response):
log.debug("Received group coordinator response %s", response)
if not self._coordinator_unknown():
# We already found the coordinator, so ignore the request
log.debug("Coordinator already known -- ignoring metadata response")
future.success(self._coordinator_id)
return
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
ok = self._client.cluster.add_group_coordinator(self.group_id, response)
if not ok:
#.........这里部分代码省略.........
示例4: __init__
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import send [as 别名]
class KafkaConsumerLag:
def __init__(self, bootstrap_servers):
self.client = KafkaClient(bootstrap_servers=bootstrap_servers)
self.client.check_version()
def _send(self, broker_id, request, response_type=None):
f = self.client.send(broker_id, request)
response = self.client.poll(future=f)
if response_type:
if response and len(response) > 0:
for r in response:
if isinstance(r, response_type):
return r
else:
if response and len(response) > 0:
return response[0]
return None
def check(self, group_topics=None, discovery=None):
"""
{
"<group>": {
"state": <str>,
"topics": {
"<topic>": {
"consumer_lag": <int>,
"partitions": {
"<partition>": {
"offset_first": <int>,
"offset_consumed": <int>,
"offset_last": <int>,
"lag": <int>
}
}
}
}
}
}
:param persist_groups:
:return: consumer statistics
"""
cluster = self.client.cluster
brokers = cluster.brokers()
# Consumer group ID -> list(topics)
if group_topics is None:
group_topics = {}
if discovery is None:
discovery = True
else:
group_topics = copy.deepcopy(group_topics)
# Set of consumer group IDs
consumer_groups = set(group_topics.iterkeys())
# Set of all known topics
topics = set(itertools.chain(*group_topics.itervalues()))
# Consumer group ID -> coordinating broker
consumer_coordinator = {}
# Coordinating broker - > list(consumer group IDs)
coordinator_consumers = {}
results = {}
for consumer_group in group_topics.iterkeys():
results[consumer_group] = {'state': None, 'topics': {}}
# Ensure connections to all brokers
for broker in brokers:
while not self.client.is_ready(broker.nodeId):
self.client.ready(broker.nodeId)
# Collect all active consumer groups
if discovery:
for broker in brokers:
response = self._send(broker.nodeId, _ListGroupsRequest(), _ListGroupsResponse)
if response:
for group in response.groups:
consumer_groups.add(group[0])
# Identify which broker is coordinating each consumer group
for group in consumer_groups:
response = self._send(next(iter(brokers)).nodeId, _GroupCoordinatorRequest(group), _GroupCoordinatorResponse)
if response:
consumer_coordinator[group] = response.coordinator_id
if response.coordinator_id not in coordinator_consumers:
coordinator_consumers[response.coordinator_id] = []
#.........这里部分代码省略.........
示例5: KafkaAdminClient
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import send [as 别名]
class KafkaAdminClient(object):
"""A class for administering the Kafka cluster.
Warning:
This is an unstable interface that was recently added and is subject to
change without warning. In particular, many methods currently return
raw protocol tuples. In future releases, we plan to make these into
nicer, more pythonic objects. Unfortunately, this will likely break
those interfaces.
The KafkaAdminClient class will negotiate for the latest version of each message
protocol format supported by both the kafka-python client library and the
Kafka broker. Usage of optional fields from protocol versions that are not
supported by the broker will result in IncompatibleBrokerVersion exceptions.
Use of this class requires a minimum broker version >= 0.10.0.0.
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. To avoid connection storms, a
randomization factor of 0.2 will be applied to the backoff
resulting in a random range between 20% below and 20% above
the computed value. Default: 1000.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 30000.
connections_max_idle_ms: Close idle connections after the number of
milliseconds specified by this config. The broker closes idle
connections after connections.max.idle.ms, so this avoids hitting
unexpected socket disconnected errors on the client.
Default: 540000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). Java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). Java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): Flag to configure whether SSL handshake
should verify that the certificate matches the broker's hostname.
Default: True.
ssl_cafile (str): Optional filename of CA file to use in certificate
veriication. Default: None.
ssl_certfile (str): Optional filename of file in PEM format containing
the client certificate, as well as any CA certificates needed to
establish the certificate's authenticity. Default: None.
ssl_keyfile (str): Optional filename containing the client private key.
Default: None.
ssl_password (str): Optional password to be used when loading the
certificate chain. Default: None.
ssl_crlfile (str): Optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
Default: None.
api_version (tuple): Specify which Kafka API version to use. If set
to None, KafkaClient will attempt to infer the broker version by
probing various APIs. Example: (0, 10, 2). Default: None
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version is None
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
metrics (kafka.metrics.Metrics): Optionally provide a metrics
instance for capturing network IO stats. Default: None.
metric_group_prefix (str): Prefix for metric names. Default: ''
sasl_mechanism (str): Authentication mechanism when security_protocol
#.........这里部分代码省略.........