本文整理汇总了Python中kafka.client_async.KafkaClient.close方法的典型用法代码示例。如果您正苦于以下问题:Python KafkaClient.close方法的具体用法?Python KafkaClient.close怎么用?Python KafkaClient.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.client_async.KafkaClient
的用法示例。
在下文中一共展示了KafkaClient.close方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_close
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import close [as 别名]
def test_close(conn):
cli = KafkaClient()
# Unknown node - silent
cli.close(2)
# Single node close
cli._initiate_connect(0)
assert not conn.close.call_count
cli.close(0)
assert conn.close.call_count == 1
# All node close
cli._initiate_connect(1)
cli.close()
assert conn.close.call_count == 3
示例2: test_close
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import close [as 别名]
def test_close(mocker, conn):
cli = KafkaClient()
mocker.patch.object(cli, '_selector')
# Unknown node - silent
cli.close(2)
# Single node close
cli._maybe_connect(0)
assert not conn.close.call_count
cli.close(0)
assert conn.close.call_count == 1
# All node close
cli._maybe_connect(1)
cli.close()
assert conn.close.call_count == 3
示例3: test_close
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import close [as 别名]
def test_close(mocker, conn):
cli = KafkaClient()
mocker.patch.object(cli, '_selector')
# bootstrap connection should have been closed
assert conn.close.call_count == 1
# Unknown node - silent
cli.close(2)
# Single node close
cli._maybe_connect(0)
assert conn.close.call_count == 1
cli.close(0)
assert conn.close.call_count == 2
# All node close
cli._maybe_connect(1)
cli.close()
assert conn.close.call_count == 4
示例4: KafkaConsumer
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import close [as 别名]
#.........这里部分代码省略.........
self.config['auto_offset_reset'] = new_config
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
self._metrics = Metrics(metric_config, reporters)
# TODO _metrics likely needs to be passed to KafkaClient, etc.
# api_version was previously a str. accept old format for now
if isinstance(self.config['api_version'], str):
str_version = self.config['api_version']
if str_version == 'auto':
self.config['api_version'] = None
else:
self.config['api_version'] = tuple(map(int, str_version.split('.')))
log.warning('use api_version=%s [tuple] -- "%s" as str is deprecated',
str(self.config['api_version']), str_version)
self._client = KafkaClient(metrics=self._metrics, **self.config)
# Get auto-discovered version from client if necessary
if self.config['api_version'] is None:
self.config['api_version'] = self._client.config['api_version']
self._subscription = SubscriptionState(self.config['auto_offset_reset'])
self._fetcher = Fetcher(
self._client, self._subscription, self._metrics, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, self._metrics,
assignors=self.config['partition_assignment_strategy'],
**self.config)
self._closed = False
self._iterator = None
self._consumer_timeout = float('inf')
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
def assign(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): assignment for this instance.
Raises:
IllegalStateError: if consumer has already called subscribe()
Warning:
It is not possible to use both manual partition assignment with
assign() and group assignment with subscribe().
Note:
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
no rebalance operation triggered when group membership or cluster
and topic metadata change.
"""
self._subscription.assign_from_user(partitions)
self._client.set_topics([tp.topic for tp in partitions])
示例5: __init__
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import close [as 别名]
#.........这里部分代码省略.........
if tp.topic not in request_partitions:
request_partitions[tp.topic] = []
# Time value '-2' is to get the offset for first available message
request_partitions[tp.topic].append((tp.partition, -2, 1))
# List(topic, List(partition, time, max_offsets))
topic_partitions = []
for tp in request_partitions.iteritems():
topic_partitions.append(tp)
# Request partition start offsets
response = self._send(broker, _OffsetRequest(-1, topic_partitions), _OffsetResponse)
if response:
for offset in response.topics:
topic = offset[0]
if topic not in start_offsets:
start_offsets[topic] = {}
for p in offset[1]:
start_offsets[topic][p[0]] = p[2][0]
for tp in topic_partitions:
for i, ptm in enumerate(tp[1]):
# Time value '-1' is to get the offset for next new message
tp[1][i] = (ptm[0], -1, 1)
# Request partition end offsets
response = self._send(broker, _OffsetRequest(-1, topic_partitions), _OffsetResponse)
if response:
for offset in response.topics:
topic = offset[0]
if topic not in end_offsets:
end_offsets[topic] = {}
for p in offset[1]:
end_offsets[topic][p[0]] = p[2][0]
# Populate with offset values
for group, topics in group_topics.iteritems():
coordinator = consumer_coordinator[group]
# topic -> list(partition)
request_partitions = {}
for topic in topics:
results[group]['topics'][topic]['consumer_lag'] = 0
results[group]['topics'][topic]['partitions'] = {}
if topic in start_offsets:
for p in start_offsets[topic]:
results[group]['topics'][topic]['partitions'][p] = {
'offset_first': start_offsets[topic][p],
'offset_last': end_offsets[topic][p],
'offset_consumed': 0,
'lag' : 0}
if topic not in request_partitions:
request_partitions[topic] = []
request_partitions[topic].append(p)
# List(topic -> list(partition))
topic_partitions = []
for tp in request_partitions.iteritems():
topic_partitions.append(tp)
response = self._send(coordinator, _OffsetFetchRequest(group, topic_partitions), _OffsetFetchResponse)
if response:
for offset in response.topics:
topic = offset[0]
offsets = offset[1]
if topic not in results[group]['topics']:
continue
for p_offset in offsets:
partition = p_offset[0]
offset_consumed = p_offset[1]
p_results = results[group]['topics'][topic]['partitions'][partition]
if offset_consumed != -1:
p_results['offset_consumed'] = offset_consumed
p_results['lag'] = p_results['offset_last'] - offset_consumed
else:
p_results['offset_consumed'] = 0
p_results['lag'] = p_results['offset_last'] - p_results['offset_first']
results[group]['topics'][topic]['consumer_lag'] += p_results['lag']
return results
def close(self):
if self.client:
self.client.close()
示例6: KafkaConsumer
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import close [as 别名]
#.........这里部分代码省略.........
#'metrics_sample_window_ms': 30000,
}
def __init__(self, *topics, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, "Unrecognized configs: %s" % configs
deprecated = {"smallest": "earliest", "largest": "latest"}
if self.config["auto_offset_reset"] in deprecated:
new_config = deprecated[self.config["auto_offset_reset"]]
log.warning("use auto_offset_reset=%s (%s is deprecated)", new_config, self.config["auto_offset_reset"])
self.config["auto_offset_reset"] = new_config
self._client = KafkaClient(**self.config)
# Check Broker Version if not set explicitly
if self.config["api_version"] == "auto":
self.config["api_version"] = self._client.check_version()
assert self.config["api_version"] in ("0.9", "0.8.2", "0.8.1", "0.8.0")
# Convert api_version config to tuple for easy comparisons
self.config["api_version"] = tuple(map(int, self.config["api_version"].split(".")))
self._subscription = SubscriptionState(self.config["auto_offset_reset"])
self._fetcher = Fetcher(self._client, self._subscription, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, assignors=self.config["partition_assignment_strategy"], **self.config
)
self._closed = False
self._iterator = None
self._consumer_timeout = float("inf")
# self.metrics = None
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
def assign(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): assignment for this instance.
Raises:
IllegalStateError: if consumer has already called subscribe()
Warning:
It is not possible to use both manual partition assignment with
assign() and group assignment with subscribe().
Note:
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
no rebalance operation triggered when group membership or cluster
and topic metadata change.
"""
self._subscription.assign_from_user(partitions)
示例7: KafkaAdminClient
# 需要导入模块: from kafka.client_async import KafkaClient [as 别名]
# 或者: from kafka.client_async.KafkaClient import close [as 别名]
class KafkaAdminClient(object):
"""A class for administering the Kafka cluster.
Warning:
This is an unstable interface that was recently added and is subject to
change without warning. In particular, many methods currently return
raw protocol tuples. In future releases, we plan to make these into
nicer, more pythonic objects. Unfortunately, this will likely break
those interfaces.
The KafkaAdminClient class will negotiate for the latest version of each message
protocol format supported by both the kafka-python client library and the
Kafka broker. Usage of optional fields from protocol versions that are not
supported by the broker will result in IncompatibleBrokerVersion exceptions.
Use of this class requires a minimum broker version >= 0.10.0.0.
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. To avoid connection storms, a
randomization factor of 0.2 will be applied to the backoff
resulting in a random range between 20% below and 20% above
the computed value. Default: 1000.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 30000.
connections_max_idle_ms: Close idle connections after the number of
milliseconds specified by this config. The broker closes idle
connections after connections.max.idle.ms, so this avoids hitting
unexpected socket disconnected errors on the client.
Default: 540000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). Java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). Java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): Flag to configure whether SSL handshake
should verify that the certificate matches the broker's hostname.
Default: True.
ssl_cafile (str): Optional filename of CA file to use in certificate
veriication. Default: None.
ssl_certfile (str): Optional filename of file in PEM format containing
the client certificate, as well as any CA certificates needed to
establish the certificate's authenticity. Default: None.
ssl_keyfile (str): Optional filename containing the client private key.
Default: None.
ssl_password (str): Optional password to be used when loading the
certificate chain. Default: None.
ssl_crlfile (str): Optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
Default: None.
api_version (tuple): Specify which Kafka API version to use. If set
to None, KafkaClient will attempt to infer the broker version by
probing various APIs. Example: (0, 10, 2). Default: None
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version is None
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
metrics (kafka.metrics.Metrics): Optionally provide a metrics
instance for capturing network IO stats. Default: None.
metric_group_prefix (str): Prefix for metric names. Default: ''
sasl_mechanism (str): Authentication mechanism when security_protocol
#.........这里部分代码省略.........