本文整理汇总了Python中kafka.consumer.subscription_state.SubscriptionState类的典型用法代码示例。如果您正苦于以下问题:Python SubscriptionState类的具体用法?Python SubscriptionState怎么用?Python SubscriptionState使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SubscriptionState类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_failed_sync_group
def test_failed_sync_group(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
subscription = SubscriptionState("latest")
subscription.subscribe(topics=("topic1",))
coordinator = GroupCoordinator(client, subscription, loop=self.loop, heartbeat_interval_ms=20000)
with self.assertRaises(GroupCoordinatorNotAvailableError):
yield from coordinator._on_join_follower()
mocked = mock.MagicMock()
coordinator._client = mocked
coordinator.member_id = "some_invalid_member_id"
coordinator.coordinator_unknown = asyncio.coroutine(lambda: False)
mocked.send.side_effect = Errors.UnknownMemberIdError()
with self.assertRaises(Errors.UnknownMemberIdError):
yield from coordinator._on_join_follower()
self.assertEqual(coordinator.member_id, JoinGroupRequest.UNKNOWN_MEMBER_ID)
mocked.send.side_effect = Errors.NotCoordinatorForGroupError()
coordinator.coordinator_id = "some_id"
with self.assertRaises(Errors.NotCoordinatorForGroupError):
yield from coordinator._on_join_follower()
self.assertEqual(coordinator.coordinator_id, None)
mocked.send.side_effect = KafkaError()
with self.assertRaises(KafkaError):
yield from coordinator._on_join_follower()
# client sends LeaveGroupRequest to group coordinator
# if generation > 0 (means that client is a member of group)
# expecting no exception in this case (error should be ignored in close
# method)
coordinator.generation = 33
yield from coordinator.close()
示例2: test_failed_group_join
def test_failed_group_join(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
subscription = SubscriptionState("latest")
subscription.subscribe(topics=("topic1",))
coordinator = GroupCoordinator(client, subscription, loop=self.loop, retry_backoff_ms=10)
yield from client.bootstrap()
yield from self.wait_topic(client, "topic1")
mocked = mock.MagicMock()
coordinator._client = mocked
# no exception expected, just wait
mocked.send.side_effect = Errors.GroupLoadInProgressError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
mocked.send.side_effect = Errors.InvalidGroupIdError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
# no exception expected, member_id should be reseted
coordinator.member_id = "some_invalid_member_id"
mocked.send.side_effect = Errors.UnknownMemberIdError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
self.assertEqual(coordinator.member_id, JoinGroupRequest.UNKNOWN_MEMBER_ID)
# no exception expected, coordinator_id should be reseted
coordinator.coordinator_id = "some_id"
mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
self.assertEqual(coordinator.coordinator_id, None)
yield from client.close()
示例3: test_subscribe_pattern
def test_subscribe_pattern(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
test_listener = RebalanceListenerForTest()
subscription = SubscriptionState('latest')
subscription.subscribe(pattern='st-topic*', listener=test_listener)
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='subs-pattern-group')
yield from self.wait_topic(client, 'st-topic1')
yield from self.wait_topic(client, 'st-topic2')
yield from coordinator.ensure_active_group()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.rejoin_needed, False)
tp_list = subscription.assigned_partitions()
assigned = set([('st-topic1', 0), ('st-topic1', 1),
('st-topic2', 0), ('st-topic2', 1)])
self.assertEqual(tp_list, assigned)
self.assertEqual(test_listener.revoked, [set([])])
self.assertEqual(test_listener.assigned, [assigned])
yield from coordinator.close()
yield from client.close()
示例4: test_coordinator_workflow
def test_coordinator_workflow(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
yield from self.wait_topic(client, 'topic1')
yield from self.wait_topic(client, 'topic2')
subscription = SubscriptionState('latest')
subscription.subscribe(topics=('topic1', 'topic2'))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
session_timeout_ms=10000,
heartbeat_interval_ms=500,
retry_backoff_ms=100)
self.assertEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.rejoin_needed, True)
yield from coordinator.ensure_coordinator_known()
self.assertNotEqual(coordinator.coordinator_id, None)
yield from coordinator.ensure_active_group()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.rejoin_needed, False)
tp_list = subscription.assigned_partitions()
self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
('topic2', 0), ('topic2', 1)]))
# start second coordinator
client2 = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client2.bootstrap()
subscription2 = SubscriptionState('latest')
subscription2.subscribe(topics=('topic1', 'topic2'))
coordinator2 = GroupCoordinator(
client2, subscription2, loop=self.loop,
session_timeout_ms=10000,
heartbeat_interval_ms=500,
retry_backoff_ms=100)
yield from coordinator2.ensure_active_group()
yield from coordinator.ensure_active_group()
tp_list = subscription.assigned_partitions()
self.assertEqual(len(tp_list), 2)
tp_list2 = subscription2.assigned_partitions()
self.assertEqual(len(tp_list2), 2)
tp_list |= tp_list2
self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
('topic2', 0), ('topic2', 1)]))
yield from coordinator.close()
yield from client.close()
yield from asyncio.sleep(0.6, loop=self.loop) # wait heartbeat
yield from coordinator2.ensure_active_group()
tp_list = subscription2.assigned_partitions()
self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
('topic2', 0), ('topic2', 1)]))
yield from coordinator2.close()
yield from client2.close()
示例5: test_failed_broker_conn
def test_failed_broker_conn(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
subscription = SubscriptionState("latest")
subscription.subscribe(topics=("topic1",))
coordinator = GroupCoordinator(client, subscription, loop=self.loop)
with self.assertRaises(NoBrokersAvailable):
yield from coordinator.ensure_coordinator_known()
示例6: test_with_nocommit_support
def test_with_nocommit_support(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
subscription = SubscriptionState('latest')
subscription.subscribe(topics=('topic1',))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
enable_auto_commit=False)
self.assertEqual(coordinator._auto_commit_task, None)
示例7: test_offsets_failed_scenarios
def test_offsets_failed_scenarios(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
yield from self.wait_topic(client, 'topic1')
subscription = SubscriptionState('earliest')
subscription.subscribe(topics=('topic1',))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='test-offsets-group')
yield from coordinator.ensure_active_group()
offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
yield from coordinator.commit_offsets(offsets)
with mock.patch('kafka.common.for_code') as mocked:
mocked.return_value = Errors.GroupAuthorizationFailedError
with self.assertRaises(Errors.GroupAuthorizationFailedError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.TopicAuthorizationFailedError
with self.assertRaises(Errors.TopicAuthorizationFailedError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.InvalidCommitOffsetSizeError
with self.assertRaises(Errors.InvalidCommitOffsetSizeError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.GroupLoadInProgressError
with self.assertRaises(Errors.GroupLoadInProgressError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.RebalanceInProgressError
with self.assertRaises(Errors.RebalanceInProgressError):
yield from coordinator.commit_offsets(offsets)
self.assertEqual(subscription.needs_partition_assignment, True)
mocked.return_value = KafkaError
with self.assertRaises(KafkaError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.NotCoordinatorForGroupError
with self.assertRaises(Errors.NotCoordinatorForGroupError):
yield from coordinator.commit_offsets(offsets)
self.assertEqual(coordinator.coordinator_id, None)
with self.assertRaises(
Errors.GroupCoordinatorNotAvailableError):
yield from coordinator.commit_offsets(offsets)
yield from coordinator.close()
yield from client.close()
示例8: test_update_fetch_positions
def test_update_fetch_positions(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=[])
subscriptions = SubscriptionState('latest')
fetcher = Fetcher(client, subscriptions, loop=self.loop)
partition = TopicPartition('test', 0)
# partition is not assigned, should be ignored
yield from fetcher.update_fetch_positions([partition])
state = TopicPartitionState()
state.seek(0)
subscriptions.assignment[partition] = state
# partition is fetchable, no need to update position
yield from fetcher.update_fetch_positions([partition])
client.ready = mock.MagicMock()
client.ready.side_effect = asyncio.coroutine(lambda a: True)
client.force_metadata_update = mock.MagicMock()
client.force_metadata_update.side_effect = asyncio.coroutine(
lambda: False)
client.send = mock.MagicMock()
client.send.side_effect = asyncio.coroutine(
lambda n, r: OffsetResponse([('test', [(0, 0, [4])])]))
state.await_reset(OffsetResetStrategy.LATEST)
client.cluster.leader_for_partition = mock.MagicMock()
client.cluster.leader_for_partition.side_effect = [None, -1, 0]
yield from fetcher.update_fetch_positions([partition])
self.assertEqual(state.position, 4)
client.cluster.leader_for_partition = mock.MagicMock()
client.cluster.leader_for_partition.return_value = 1
client.send = mock.MagicMock()
client.send.side_effect = asyncio.coroutine(
lambda n, r: OffsetResponse([('test', [(0, 3, [])])]))
state.await_reset(OffsetResetStrategy.LATEST)
with self.assertRaises(UnknownTopicOrPartitionError):
yield from fetcher.update_fetch_positions([partition])
client.send.side_effect = asyncio.coroutine(
lambda n, r: OffsetResponse([('test', [(0, -1, [])])]))
with self.assertRaises(UnknownError):
yield from fetcher.update_fetch_positions([partition])
yield from fetcher.close()
示例9: __init__
def __init__(self, *topics, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, 'Unrecognized configs: %s' % configs
deprecated = {'smallest': 'earliest', 'largest': 'latest'}
if self.config['auto_offset_reset'] in deprecated:
new_config = deprecated[self.config['auto_offset_reset']]
log.warning('use auto_offset_reset=%s (%s is deprecated)',
new_config, self.config['auto_offset_reset'])
self.config['auto_offset_reset'] = new_config
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
self._metrics = Metrics(metric_config, reporters)
metric_group_prefix = 'consumer'
# TODO _metrics likely needs to be passed to KafkaClient, etc.
# api_version was previously a str. accept old format for now
if isinstance(self.config['api_version'], str):
str_version = self.config['api_version']
if str_version == 'auto':
self.config['api_version'] = None
else:
self.config['api_version'] = tuple(map(int, str_version.split('.')))
log.warning('use api_version=%s (%s is deprecated)',
str(self.config['api_version']), str_version)
self._client = KafkaClient(**self.config)
# Get auto-discovered version from client if necessary
if self.config['api_version'] is None:
self.config['api_version'] = self._client.config['api_version']
self._subscription = SubscriptionState(self.config['auto_offset_reset'])
self._fetcher = Fetcher(
self._client, self._subscription, self._metrics, metric_group_prefix, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, self._metrics, metric_group_prefix,
assignors=self.config['partition_assignment_strategy'],
**self.config)
self._closed = False
self._iterator = None
self._consumer_timeout = float('inf')
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
示例10: test_get_offsets
def test_get_offsets(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
subscription = SubscriptionState("earliest")
subscription.subscribe(topics=("topic1",))
coordinator = GroupCoordinator(client, subscription, loop=self.loop, group_id="getoffsets-group")
yield from self.wait_topic(client, "topic1")
producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=self.hosts)
yield from producer.start()
yield from producer.send("topic1", b"first msg", partition=0)
yield from producer.send("topic1", b"second msg", partition=1)
yield from producer.send("topic1", b"third msg", partition=1)
yield from producer.stop()
yield from coordinator.ensure_active_group()
offsets = {
TopicPartition("topic1", 0): OffsetAndMetadata(1, ""),
TopicPartition("topic1", 1): OffsetAndMetadata(2, ""),
}
yield from coordinator.commit_offsets(offsets)
self.assertEqual(subscription.all_consumed_offsets(), {})
subscription.seek(("topic1", 0), 0)
subscription.seek(("topic1", 1), 0)
yield from coordinator.refresh_committed_offsets()
self.assertEqual(subscription.assignment[("topic1", 0)].committed, 1)
self.assertEqual(subscription.assignment[("topic1", 1)].committed, 2)
yield from coordinator.close()
yield from client.close()
示例11: test_fetchoffsets_failed_scenarios
def test_fetchoffsets_failed_scenarios(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
yield from self.wait_topic(client, 'topic1')
subscription = SubscriptionState('earliest')
subscription.subscribe(topics=('topic1',))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='fetch-offsets-group')
yield from coordinator.ensure_active_group()
offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
with mock.patch('kafka.common.for_code') as mocked:
mocked.side_effect = MockedKafkaErrCode(
Errors.GroupLoadInProgressError, Errors.NoError)
yield from coordinator.fetch_committed_offsets(offsets)
mocked.side_effect = MockedKafkaErrCode(
Errors.UnknownMemberIdError, Errors.NoError)
with self.assertRaises(Errors.UnknownMemberIdError):
yield from coordinator.fetch_committed_offsets(offsets)
self.assertEqual(subscription.needs_partition_assignment, True)
mocked.side_effect = None
mocked.return_value = Errors.UnknownTopicOrPartitionError
r = yield from coordinator.fetch_committed_offsets(offsets)
self.assertEqual(r, {})
mocked.return_value = KafkaError
with self.assertRaises(KafkaError):
yield from coordinator.fetch_committed_offsets(offsets)
mocked.side_effect = MockedKafkaErrCode(
Errors.NotCoordinatorForGroupError,
Errors.NoError, Errors.NoError, Errors.NoError)
yield from coordinator.fetch_committed_offsets(offsets)
yield from coordinator.close()
yield from client.close()
示例12: __init__
def __init__(self, *topics, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, 'Unrecognized configs: %s' % configs
deprecated = {'smallest': 'earliest', 'largest': 'latest' }
if self.config['auto_offset_reset'] in deprecated:
new_config = deprecated[self.config['auto_offset_reset']]
log.warning('use auto_offset_reset=%s (%s is deprecated)',
new_config, self.config['auto_offset_reset'])
self.config['auto_offset_reset'] = new_config
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
reporters.append(DictReporter('kafka.consumer'))
self._metrics = Metrics(metric_config, reporters)
metric_group_prefix = 'consumer'
# TODO _metrics likely needs to be passed to KafkaClient, etc.
client = self.config.pop('client', None) or KafkaClient(**self.config)
self._client = client
# Check Broker Version if not set explicitly
if self.config['api_version'] == 'auto':
self.config['api_version'] = self._client.check_version()
assert self.config['api_version'] in ('0.10', '0.9', '0.8.2', '0.8.1', '0.8.0'), 'Unrecognized api version'
# Convert api_version config to tuple for easy comparisons
self.config['api_version'] = tuple(
map(int, self.config['api_version'].split('.')))
self._subscription = SubscriptionState(self.config['auto_offset_reset'])
self._fetcher = Fetcher(
self._client, self._subscription, self._metrics, metric_group_prefix, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, self._metrics, metric_group_prefix,
assignors=self.config['partition_assignment_strategy'],
**self.config)
self._closed = False
self._iterator = None
self._consumer_timeout = float('inf')
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
示例13: __init__
def __init__(self, *topics, loop,
bootstrap_servers='localhost',
client_id='aiokafka-'+__version__,
group_id=None,
key_deserializer=None, value_deserializer=None,
fetch_max_wait_ms=500,
fetch_min_bytes=1,
max_partition_fetch_bytes=1 * 1024 * 1024,
request_timeout_ms=40 * 1000,
retry_backoff_ms=100,
reconnect_backoff_ms=50,
auto_offset_reset='latest',
enable_auto_commit=True,
auto_commit_interval_ms=5000,
check_crcs=True,
metadata_max_age_ms=5 * 60 * 1000,
partition_assignment_strategy=(RoundRobinPartitionAssignor,),
heartbeat_interval_ms=3000,
session_timeout_ms=30000,
consumer_timeout_ms=100,
api_version='auto'):
if api_version not in ('auto', '0.9'):
raise ValueError("Unsupported Kafka API version")
self._client = AIOKafkaClient(
loop=loop, bootstrap_servers=bootstrap_servers,
client_id=client_id, metadata_max_age_ms=metadata_max_age_ms,
request_timeout_ms=request_timeout_ms)
self._api_version = api_version
self._group_id = group_id
self._heartbeat_interval_ms = heartbeat_interval_ms
self._retry_backoff_ms = retry_backoff_ms
self._enable_auto_commit = enable_auto_commit
self._auto_commit_interval_ms = auto_commit_interval_ms
self._partition_assignment_strategy = partition_assignment_strategy
self._key_deserializer = key_deserializer
self._value_deserializer = value_deserializer
self._fetch_min_bytes = fetch_min_bytes
self._fetch_max_wait_ms = fetch_max_wait_ms
self._max_partition_fetch_bytes = max_partition_fetch_bytes
self._consumer_timeout = consumer_timeout_ms / 1000
self._check_crcs = check_crcs
self._subscription = SubscriptionState(auto_offset_reset)
self._fetcher = None
self._coordinator = None
self._closed = False
self._loop = loop
self._topics = topics
if topics:
self._client.set_topics(topics)
self._subscription.subscribe(topics=topics)
示例14: __init__
def __init__(self, *topics, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, 'Unrecognized configs: %s' % configs
deprecated = {'smallest': 'earliest', 'largest': 'latest' }
if self.config['auto_offset_reset'] in deprecated:
new_config = deprecated[self.config['auto_offset_reset']]
log.warning('use auto_offset_reset=%s (%s is deprecated)',
new_config, self.config['auto_offset_reset'])
self.config['auto_offset_reset'] = new_config
self._client = KafkaClient(**self.config)
# Check Broker Version if not set explicitly
if self.config['api_version'] == 'auto':
self.config['api_version'] = self._client.check_version()
assert self.config['api_version'] in ('0.9', '0.8.2', '0.8.1', '0.8.0'), 'Unrecognized api version'
# Convert api_version config to tuple for easy comparisons
self.config['api_version'] = tuple(
map(int, self.config['api_version'].split('.')))
self._subscription = SubscriptionState(self.config['auto_offset_reset'])
self._fetcher = Fetcher(
self._client, self._subscription, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription,
assignors=self.config['partition_assignment_strategy'],
**self.config)
self._closed = False
self._iterator = None
self._consumer_timeout = float('inf')
#self.metrics = None
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
示例15: __init__
def __init__(self, *topics, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, "Unrecognized configs: %s" % configs
deprecated = {"smallest": "earliest", "largest": "latest"}
if self.config["auto_offset_reset"] in deprecated:
new_config = deprecated[self.config["auto_offset_reset"]]
log.warning("use auto_offset_reset=%s (%s is deprecated)", new_config, self.config["auto_offset_reset"])
self.config["auto_offset_reset"] = new_config
self._client = KafkaClient(**self.config)
# Check Broker Version if not set explicitly
if self.config["api_version"] == "auto":
self.config["api_version"] = self._client.check_version()
assert self.config["api_version"] in ("0.9", "0.8.2", "0.8.1", "0.8.0")
# Convert api_version config to tuple for easy comparisons
self.config["api_version"] = tuple(map(int, self.config["api_version"].split(".")))
self._subscription = SubscriptionState(self.config["auto_offset_reset"])
self._fetcher = Fetcher(self._client, self._subscription, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, assignors=self.config["partition_assignment_strategy"], **self.config
)
self._closed = False
self._iterator = None
self._consumer_timeout = float("inf")
# self.metrics = None
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)