本文整理汇总了Python中kafka.client_async.KafkaClient类的典型用法代码示例。如果您正苦于以下问题:Python KafkaClient类的具体用法?Python KafkaClient怎么用?Python KafkaClient使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了KafkaClient类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_send
def test_send(conn):
cli = KafkaClient()
# Send to unknown node => raises AssertionError
try:
cli.send(2, None)
assert False, 'Exception not raised'
except AssertionError:
pass
# Send to disconnected node => NodeNotReady
conn.state = ConnectionStates.DISCONNECTED
f = cli.send(0, None)
assert f.failed()
assert isinstance(f.exception, Errors.NodeNotReadyError)
conn.state = ConnectionStates.CONNECTED
cli._maybe_connect(0)
# ProduceRequest w/ 0 required_acks -> no response
request = ProduceRequest[0](0, 0, [])
ret = cli.send(0, request)
assert conn.send.called_with(request, expect_response=False)
assert isinstance(ret, Future)
request = MetadataRequest[0]([])
cli.send(0, request)
assert conn.send.called_with(request, expect_response=True)
示例2: test_poll
def test_poll(mocker):
mocker.patch.object(KafkaClient, '_bootstrap')
metadata = mocker.patch.object(KafkaClient, '_maybe_refresh_metadata')
_poll = mocker.patch.object(KafkaClient, '_poll')
cli = KafkaClient(api_version=(0, 9))
tasks = mocker.patch.object(cli._delayed_tasks, 'next_at')
# metadata timeout wins
metadata.return_value = 1000
tasks.return_value = 2
cli.poll()
_poll.assert_called_with(1.0, sleep=True)
# user timeout wins
cli.poll(250)
_poll.assert_called_with(0.25, sleep=True)
# tasks timeout wins
tasks.return_value = 0
cli.poll(250)
_poll.assert_called_with(0, sleep=True)
# default is request_timeout_ms
metadata.return_value = 1000000
tasks.return_value = 10000
cli.poll()
_poll.assert_called_with(cli.config['request_timeout_ms'] / 1000.0,
sleep=True)
示例3: test_initiate_connect
def test_initiate_connect(conn):
cli = KafkaClient()
try:
# Node not in metadata, raises AssertionError
cli._initiate_connect(2)
except AssertionError:
pass
else:
assert False, 'Exception not raised'
assert 0 not in cli._conns
state = cli._initiate_connect(0)
assert cli._conns[0] is conn
assert state is conn.state
示例4: test_finish_connect
def test_finish_connect(conn):
cli = KafkaClient()
try:
# Node not in metadata, raises AssertionError
cli._initiate_connect(2)
except AssertionError:
pass
else:
assert False, 'Exception not raised'
assert 0 not in cli._conns
cli._initiate_connect(0)
conn.connect.return_value = ConnectionStates.CONNECTING
state = cli._finish_connect(0)
assert 0 in cli._connecting
assert state is ConnectionStates.CONNECTING
conn.connect.return_value = ConnectionStates.CONNECTED
state = cli._finish_connect(0)
assert 0 not in cli._connecting
assert state is ConnectionStates.CONNECTED
# Failure to connect should trigger metadata update
assert not cli.cluster._need_update
cli._connecting.add(0)
conn.connect.return_value = ConnectionStates.DISCONNECTED
state = cli._finish_connect(0)
assert 0 not in cli._connecting
assert state is ConnectionStates.DISCONNECTED
assert cli.cluster._need_update
示例5: test_maybe_refresh_metadata_ttl
def test_maybe_refresh_metadata_ttl(mocker):
mocker.patch.object(KafkaClient, '_bootstrap')
_poll = mocker.patch.object(KafkaClient, '_poll')
cli = KafkaClient(request_timeout_ms=9999999, retry_backoff_ms=2222)
tasks = mocker.patch.object(cli._delayed_tasks, 'next_at')
tasks.return_value = 9999999
ttl = mocker.patch.object(cli.cluster, 'ttl')
ttl.return_value = 1234
cli.poll(timeout_ms=9999999, sleep=True)
_poll.assert_called_with(1.234, sleep=True)
示例6: __init__
def __init__(self, **configs):
log.debug("Starting KafkaAdminClient with configuration: %s", configs)
extra_configs = set(configs).difference(self.DEFAULT_CONFIG)
if extra_configs:
raise KafkaConfigurationError("Unrecognized configs: {}".format(extra_configs))
self.config = copy.copy(self.DEFAULT_CONFIG)
self.config.update(configs)
# Configure metrics
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
self._metrics = Metrics(metric_config, reporters)
self._client = KafkaClient(metrics=self._metrics,
metric_group_prefix='admin',
**self.config)
# Get auto-discovered version from client if necessary
if self.config['api_version'] is None:
self.config['api_version'] = self._client.config['api_version']
self._closed = False
self._refresh_controller_id()
log.debug("KafkaAdminClient started.")
示例7: __init__
def __init__(self, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
self.config.update(configs)
self._client = KafkaClient(**self.config)
self._coordinator_id = None
self.group_id = configs['group_id']
self.topic = configs['topic']
示例8: test_maybe_connect
def test_maybe_connect(conn):
cli = KafkaClient()
try:
# Node not in metadata, raises AssertionError
cli._maybe_connect(2)
except AssertionError:
pass
else:
assert False, 'Exception not raised'
# New node_id creates a conn object
assert 0 not in cli._conns
conn.state = ConnectionStates.DISCONNECTED
conn.connect.side_effect = lambda: conn._set_conn_state(ConnectionStates.CONNECTING)
assert cli._maybe_connect(0) is False
assert cli._conns[0] is conn
示例9: test_maybe_refresh_metadata_backoff
def test_maybe_refresh_metadata_backoff(mocker):
mocker.patch.object(KafkaClient, '_bootstrap')
_poll = mocker.patch.object(KafkaClient, '_poll')
cli = KafkaClient(request_timeout_ms=9999999, retry_backoff_ms=2222)
tasks = mocker.patch.object(cli._delayed_tasks, 'next_at')
tasks.return_value = 9999999
ttl = mocker.patch.object(cli.cluster, 'ttl')
ttl.return_value = 0
now = time.time()
t = mocker.patch('time.time')
t.return_value = now
cli._last_no_node_available_ms = now * 1000
cli.poll(timeout_ms=9999999, sleep=True)
_poll.assert_called_with(2.222, sleep=True)
示例10: __init__
def __init__(self, *topics, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, 'Unrecognized configs: %s' % configs
deprecated = {'smallest': 'earliest', 'largest': 'latest'}
if self.config['auto_offset_reset'] in deprecated:
new_config = deprecated[self.config['auto_offset_reset']]
log.warning('use auto_offset_reset=%s (%s is deprecated)',
new_config, self.config['auto_offset_reset'])
self.config['auto_offset_reset'] = new_config
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
self._metrics = Metrics(metric_config, reporters)
metric_group_prefix = 'consumer'
# TODO _metrics likely needs to be passed to KafkaClient, etc.
# api_version was previously a str. accept old format for now
if isinstance(self.config['api_version'], str):
str_version = self.config['api_version']
if str_version == 'auto':
self.config['api_version'] = None
else:
self.config['api_version'] = tuple(map(int, str_version.split('.')))
log.warning('use api_version=%s (%s is deprecated)',
str(self.config['api_version']), str_version)
self._client = KafkaClient(**self.config)
# Get auto-discovered version from client if necessary
if self.config['api_version'] is None:
self.config['api_version'] = self._client.config['api_version']
self._subscription = SubscriptionState(self.config['auto_offset_reset'])
self._fetcher = Fetcher(
self._client, self._subscription, self._metrics, metric_group_prefix, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, self._metrics, metric_group_prefix,
assignors=self.config['partition_assignment_strategy'],
**self.config)
self._closed = False
self._iterator = None
self._consumer_timeout = float('inf')
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
示例11: test_maybe_refresh_metadata_update
def test_maybe_refresh_metadata_update(mocker):
mocker.patch.object(KafkaClient, '_bootstrap')
_poll = mocker.patch.object(KafkaClient, '_poll')
cli = KafkaClient(request_timeout_ms=9999999, retry_backoff_ms=2222)
tasks = mocker.patch.object(cli._delayed_tasks, 'next_at')
tasks.return_value = 9999999
ttl = mocker.patch.object(cli.cluster, 'ttl')
ttl.return_value = 0
mocker.patch.object(cli, 'least_loaded_node', return_value='foobar')
mocker.patch.object(cli, '_can_send_request', return_value=True)
send = mocker.patch.object(cli, 'send')
cli.poll(timeout_ms=9999999, sleep=True)
_poll.assert_called_with(0, sleep=True)
assert cli._metadata_refresh_in_progress
request = MetadataRequest[0]([])
send.assert_called_with('foobar', request)
示例12: test_maybe_connect
def test_maybe_connect(conn):
cli = KafkaClient()
try:
# Node not in metadata, raises AssertionError
cli._maybe_connect(2)
except AssertionError:
pass
else:
assert False, 'Exception not raised'
assert 0 not in cli._conns
conn.state = ConnectionStates.DISCONNECTED
conn.connect.side_effect = lambda: ConnectionStates.CONNECTING
assert cli._maybe_connect(0) is False
assert cli._conns[0] is conn
assert 0 in cli._connecting
conn.state = ConnectionStates.CONNECTING
conn.connect.side_effect = lambda: ConnectionStates.CONNECTED
assert cli._maybe_connect(0) is True
assert 0 not in cli._connecting
# Failure to connect should trigger metadata update
assert cli.cluster._need_update is False
cli._connecting.add(0)
conn.state = ConnectionStates.CONNECTING
conn.connect.side_effect = lambda: ConnectionStates.DISCONNECTED
assert cli._maybe_connect(0) is False
assert 0 not in cli._connecting
assert cli.cluster._need_update is True
示例13: test_maybe_refresh_metadata_failure
def test_maybe_refresh_metadata_failure(mocker):
mocker.patch.object(KafkaClient, '_bootstrap')
_poll = mocker.patch.object(KafkaClient, '_poll')
cli = KafkaClient(request_timeout_ms=9999999, retry_backoff_ms=2222)
tasks = mocker.patch.object(cli._delayed_tasks, 'next_at')
tasks.return_value = 9999999
ttl = mocker.patch.object(cli.cluster, 'ttl')
ttl.return_value = 0
mocker.patch.object(cli, 'least_loaded_node', return_value='foobar')
now = time.time()
t = mocker.patch('time.time')
t.return_value = now
cli.poll(timeout_ms=9999999, sleep=True)
_poll.assert_called_with(0, sleep=True)
assert cli._last_no_node_available_ms == now * 1000
assert not cli._metadata_refresh_in_progress
示例14: __init__
def __init__(self, *topics, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, 'Unrecognized configs: %s' % configs
deprecated = {'smallest': 'earliest', 'largest': 'latest' }
if self.config['auto_offset_reset'] in deprecated:
new_config = deprecated[self.config['auto_offset_reset']]
log.warning('use auto_offset_reset=%s (%s is deprecated)',
new_config, self.config['auto_offset_reset'])
self.config['auto_offset_reset'] = new_config
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
reporters.append(DictReporter('kafka.consumer'))
self._metrics = Metrics(metric_config, reporters)
metric_group_prefix = 'consumer'
# TODO _metrics likely needs to be passed to KafkaClient, etc.
self._client = KafkaClient(**self.config)
# Check Broker Version if not set explicitly
if self.config['api_version'] == 'auto':
self.config['api_version'] = self._client.check_version()
assert self.config['api_version'] in ('0.10', '0.9', '0.8.2', '0.8.1', '0.8.0'), 'Unrecognized api version'
# Convert api_version config to tuple for easy comparisons
self.config['api_version'] = tuple(
map(int, self.config['api_version'].split('.')))
self._subscription = SubscriptionState(self.config['auto_offset_reset'])
self._fetcher = Fetcher(
self._client, self._subscription, self._metrics, metric_group_prefix, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, self._metrics, metric_group_prefix,
assignors=self.config['partition_assignment_strategy'],
**self.config)
self._closed = False
self._iterator = None
self._consumer_timeout = float('inf')
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
示例15: test_set_topics
def test_set_topics(mocker):
request_update = mocker.patch.object(ClusterMetadata, 'request_update')
request_update.side_effect = lambda: Future()
cli = KafkaClient(api_version=(0, 10))
# replace 'empty' with 'non empty'
request_update.reset_mock()
fut = cli.set_topics(['t1', 't2'])
assert not fut.is_done
request_update.assert_called_with()
# replace 'non empty' with 'same'
request_update.reset_mock()
fut = cli.set_topics(['t1', 't2'])
assert fut.is_done
assert fut.value == set(['t1', 't2'])
request_update.assert_not_called()
# replace 'non empty' with 'empty'
request_update.reset_mock()
fut = cli.set_topics([])
assert fut.is_done
assert fut.value == set()
request_update.assert_not_called()