本文整理汇总了Python中aiokafka.client.AIOKafkaClient.bootstrap方法的典型用法代码示例。如果您正苦于以下问题:Python AIOKafkaClient.bootstrap方法的具体用法?Python AIOKafkaClient.bootstrap怎么用?Python AIOKafkaClient.bootstrap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类aiokafka.client.AIOKafkaClient
的用法示例。
在下文中一共展示了AIOKafkaClient.bootstrap方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_coordinator_workflow
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_coordinator_workflow(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
yield from self.wait_topic(client, 'topic1')
yield from self.wait_topic(client, 'topic2')
subscription = SubscriptionState('latest')
subscription.subscribe(topics=('topic1', 'topic2'))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
session_timeout_ms=10000,
heartbeat_interval_ms=500,
retry_backoff_ms=100)
self.assertEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.rejoin_needed, True)
yield from coordinator.ensure_coordinator_known()
self.assertNotEqual(coordinator.coordinator_id, None)
yield from coordinator.ensure_active_group()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.rejoin_needed, False)
tp_list = subscription.assigned_partitions()
self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
('topic2', 0), ('topic2', 1)]))
# start second coordinator
client2 = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client2.bootstrap()
subscription2 = SubscriptionState('latest')
subscription2.subscribe(topics=('topic1', 'topic2'))
coordinator2 = GroupCoordinator(
client2, subscription2, loop=self.loop,
session_timeout_ms=10000,
heartbeat_interval_ms=500,
retry_backoff_ms=100)
yield from coordinator2.ensure_active_group()
yield from coordinator.ensure_active_group()
tp_list = subscription.assigned_partitions()
self.assertEqual(len(tp_list), 2)
tp_list2 = subscription2.assigned_partitions()
self.assertEqual(len(tp_list2), 2)
tp_list |= tp_list2
self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
('topic2', 0), ('topic2', 1)]))
yield from coordinator.close()
yield from client.close()
yield from asyncio.sleep(0.6, loop=self.loop) # wait heartbeat
yield from coordinator2.ensure_active_group()
tp_list = subscription2.assigned_partitions()
self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
('topic2', 0), ('topic2', 1)]))
yield from coordinator2.close()
yield from client2.close()
示例2: test_send_request
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_send_request(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
node_id = client.get_random_node()
resp = yield from client.send(node_id, MetadataRequest([]))
self.assertTrue(isinstance(resp, MetadataResponse))
yield from client.close()
示例3: test_subscribe_pattern
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_subscribe_pattern(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
test_listener = RebalanceListenerForTest()
subscription = SubscriptionState('latest')
subscription.subscribe(pattern='st-topic*', listener=test_listener)
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='subs-pattern-group')
yield from self.wait_topic(client, 'st-topic1')
yield from self.wait_topic(client, 'st-topic2')
yield from coordinator.ensure_active_group()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.rejoin_needed, False)
tp_list = subscription.assigned_partitions()
assigned = set([('st-topic1', 0), ('st-topic1', 1),
('st-topic2', 0), ('st-topic2', 1)])
self.assertEqual(tp_list, assigned)
self.assertEqual(test_listener.revoked, [set([])])
self.assertEqual(test_listener.assigned, [assigned])
yield from coordinator.close()
yield from client.close()
示例4: test_failed_group_join
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_failed_group_join(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
subscription = SubscriptionState("latest")
subscription.subscribe(topics=("topic1",))
coordinator = GroupCoordinator(client, subscription, loop=self.loop, retry_backoff_ms=10)
yield from client.bootstrap()
yield from self.wait_topic(client, "topic1")
mocked = mock.MagicMock()
coordinator._client = mocked
# no exception expected, just wait
mocked.send.side_effect = Errors.GroupLoadInProgressError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
mocked.send.side_effect = Errors.InvalidGroupIdError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
# no exception expected, member_id should be reseted
coordinator.member_id = "some_invalid_member_id"
mocked.send.side_effect = Errors.UnknownMemberIdError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
self.assertEqual(coordinator.member_id, JoinGroupRequest.UNKNOWN_MEMBER_ID)
# no exception expected, coordinator_id should be reseted
coordinator.coordinator_id = "some_id"
mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
self.assertEqual(coordinator.coordinator_id, None)
yield from client.close()
示例5: test_no_concurrent_send_on_connection
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_no_concurrent_send_on_connection(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=self.hosts,
metadata_max_age_ms=10000)
yield from client.bootstrap()
self.add_cleanup(client.close)
yield from self.wait_topic(client, self.topic)
node_id = client.get_random_node()
wait_request = FetchRequest_v0(
-1, # replica_id
500, # max_wait_ms
1024 * 1024, # min_bytes
[(self.topic, [(0, 0, 1024)]
)])
vanila_request = MetadataRequest([])
send_time = self.loop.time()
long_task = self.loop.create_task(
client.send(node_id, wait_request)
)
yield from asyncio.sleep(0.0001, loop=self.loop)
self.assertFalse(long_task.done())
yield from client.send(node_id, vanila_request)
resp_time = self.loop.time()
fetch_resp = yield from long_task
# Check error code like resp->topics[0]->partitions[0]->error_code
self.assertEqual(fetch_resp.topics[0][1][0][1], 0)
# Check that vanila request actually executed after wait request
self.assertGreaterEqual(resp_time - send_time, 0.5)
示例6: test_get_offsets
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_get_offsets(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
subscription = SubscriptionState("earliest")
subscription.subscribe(topics=("topic1",))
coordinator = GroupCoordinator(client, subscription, loop=self.loop, group_id="getoffsets-group")
yield from self.wait_topic(client, "topic1")
producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=self.hosts)
yield from producer.start()
yield from producer.send("topic1", b"first msg", partition=0)
yield from producer.send("topic1", b"second msg", partition=1)
yield from producer.send("topic1", b"third msg", partition=1)
yield from producer.stop()
yield from coordinator.ensure_active_group()
offsets = {
TopicPartition("topic1", 0): OffsetAndMetadata(1, ""),
TopicPartition("topic1", 1): OffsetAndMetadata(2, ""),
}
yield from coordinator.commit_offsets(offsets)
self.assertEqual(subscription.all_consumed_offsets(), {})
subscription.seek(("topic1", 0), 0)
subscription.seek(("topic1", 1), 0)
yield from coordinator.refresh_committed_offsets()
self.assertEqual(subscription.assignment[("topic1", 0)].committed, 1)
self.assertEqual(subscription.assignment[("topic1", 1)].committed, 2)
yield from coordinator.close()
yield from client.close()
示例7: test_force_metadata_update_multiple_times
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_force_metadata_update_multiple_times(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=self.hosts,
metadata_max_age_ms=10000)
yield from client.bootstrap()
self.add_cleanup(client.close)
orig = client._metadata_update
with mock.patch.object(client, '_metadata_update') as mocked:
@asyncio.coroutine
def new(*args, **kw):
yield from asyncio.sleep(0.2, loop=self.loop)
return (yield from orig(*args, **kw))
mocked.side_effect = new
client.force_metadata_update()
yield from asyncio.sleep(0.01, loop=self.loop)
self.assertEqual(
len(client._metadata_update.mock_calls), 1)
client.force_metadata_update()
yield from asyncio.sleep(0.01, loop=self.loop)
self.assertEqual(
len(client._metadata_update.mock_calls), 1)
client.force_metadata_update()
yield from asyncio.sleep(0.5, loop=self.loop)
self.assertEqual(
len(client._metadata_update.mock_calls), 1)
示例8: test_bootstrap
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_bootstrap(self):
client = AIOKafkaClient(loop=self.loop,
bootstrap_servers='0.42.42.42:444')
with self.assertRaises(ConnectionError):
yield from client.bootstrap()
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
yield from self.wait_topic(client, 'test_topic')
metadata = yield from client.fetch_all_metadata()
self.assertTrue('test_topic' in metadata.topics())
client.set_topics(['t2', 't3'])
client.set_topics(['t2', 't3']) # should be ignored
client.add_topic('t2') # shold be ignored
# bootstrap again -- no error expected
yield from client.bootstrap()
yield from client.close()
示例9: test_offsets_failed_scenarios
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_offsets_failed_scenarios(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
yield from self.wait_topic(client, 'topic1')
subscription = SubscriptionState('earliest')
subscription.subscribe(topics=('topic1',))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='test-offsets-group')
yield from coordinator.ensure_active_group()
offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
yield from coordinator.commit_offsets(offsets)
with mock.patch('kafka.common.for_code') as mocked:
mocked.return_value = Errors.GroupAuthorizationFailedError
with self.assertRaises(Errors.GroupAuthorizationFailedError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.TopicAuthorizationFailedError
with self.assertRaises(Errors.TopicAuthorizationFailedError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.InvalidCommitOffsetSizeError
with self.assertRaises(Errors.InvalidCommitOffsetSizeError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.GroupLoadInProgressError
with self.assertRaises(Errors.GroupLoadInProgressError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.RebalanceInProgressError
with self.assertRaises(Errors.RebalanceInProgressError):
yield from coordinator.commit_offsets(offsets)
self.assertEqual(subscription.needs_partition_assignment, True)
mocked.return_value = KafkaError
with self.assertRaises(KafkaError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.NotCoordinatorForGroupError
with self.assertRaises(Errors.NotCoordinatorForGroupError):
yield from coordinator.commit_offsets(offsets)
self.assertEqual(coordinator.coordinator_id, None)
with self.assertRaises(
Errors.GroupCoordinatorNotAvailableError):
yield from coordinator.commit_offsets(offsets)
yield from coordinator.close()
yield from client.close()
示例10: test_metadata_update_fail
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_metadata_update_fail(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
with mock.patch.object(
AIOKafkaConnection, 'send') as mocked:
mocked.side_effect = KafkaError('mocked exception')
updated = yield from client.force_metadata_update()
self.assertEqual(updated, False)
with self.assertRaises(KafkaError):
yield from client.fetch_all_metadata()
示例11: test_set_topics_trigger_metadata_update
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_set_topics_trigger_metadata_update(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=self.hosts,
metadata_max_age_ms=10000)
yield from client.bootstrap()
self.add_cleanup(client.close)
orig = client._metadata_update
with mock.patch.object(client, '_metadata_update') as mocked:
@asyncio.coroutine
def new(*args, **kw):
yield from asyncio.sleep(0.01, loop=self.loop)
return (yield from orig(*args, **kw))
mocked.side_effect = new
yield from client.set_topics(["topic1"])
self.assertEqual(
len(client._metadata_update.mock_calls), 1)
# Same topics list should not trigger update
yield from client.set_topics(["topic1"])
self.assertEqual(
len(client._metadata_update.mock_calls), 1)
yield from client.set_topics(["topic1", "topic2"])
self.assertEqual(
len(client._metadata_update.mock_calls), 2)
# Less topics should not update too
yield from client.set_topics(["topic2"])
self.assertEqual(
len(client._metadata_update.mock_calls), 2)
# Setting [] should force update as it meens all topics
yield from client.set_topics([])
self.assertEqual(
len(client._metadata_update.mock_calls), 3)
# Changing topics during refresh should trigger 2 refreshes
client.set_topics(["topic3"])
yield from asyncio.sleep(0.001, loop=self.loop)
self.assertEqual(
len(client._metadata_update.mock_calls), 4)
yield from client.set_topics(["topic3", "topics4"])
self.assertEqual(
len(client._metadata_update.mock_calls), 5)
示例12: wait_kafka
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def wait_kafka(cls):
cls.hosts = ['{}:{}'.format(cls.kafka_host, cls.kafka_port)]
# Reconnecting until Kafka in docker becomes available
for i in range(500):
client = AIOKafkaClient(loop=cls.loop, bootstrap_servers=cls.hosts)
try:
cls.loop.run_until_complete(client.bootstrap())
# Broker can still be loading cluster layout, so we can get 0
# brokers. That counts as still not available
if client.cluster.brokers():
return
except ConnectionError:
pass
finally:
cls.loop.run_until_complete(client.close())
time.sleep(0.1)
assert False, "Kafka server never started"
示例13: test_metadata_update_fail
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_metadata_update_fail(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
# Make sure the connection is initialize before mock to avoid crashing
# api_version routine
yield from client.force_metadata_update()
with mock.patch.object(
AIOKafkaConnection, 'send') as mocked:
mocked.side_effect = KafkaError('mocked exception')
updated = yield from client.force_metadata_update()
self.assertEqual(updated, False)
with self.assertRaises(KafkaError):
yield from client.fetch_all_metadata()
yield from client.close()
示例14: test_metadata_synchronizer
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_metadata_synchronizer(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=self.hosts,
metadata_max_age_ms=100)
with mock.patch.object(
AIOKafkaClient, '_metadata_update') as mocked:
@asyncio.coroutine
def dummy(*d, **kw):
client.cluster.failed_update(None)
mocked.side_effect = dummy
yield from client.bootstrap()
yield from asyncio.sleep(0.15, loop=self.loop)
yield from client.close()
self.assertNotEqual(
len(client._metadata_update.mock_calls), 0)
示例15: test_concurrent_send_on_different_connection_groups
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import bootstrap [as 别名]
def test_concurrent_send_on_different_connection_groups(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=self.hosts,
metadata_max_age_ms=10000)
yield from client.bootstrap()
self.add_cleanup(client.close)
yield from self.wait_topic(client, self.topic)
node_id = client.get_random_node()
broker = client.cluster.broker_metadata(node_id)
client.cluster.add_coordinator(
node_id, broker.host, broker.port, rack=None,
purpose=(CoordinationType.GROUP, ""))
wait_request = FetchRequest_v0(
-1, # replica_id
500, # max_wait_ms
1024 * 1024, # min_bytes
[(self.topic, [(0, 0, 1024)]
)])
vanila_request = MetadataRequest([])
send_time = self.loop.time()
long_task = self.loop.create_task(
client.send(node_id, wait_request)
)
yield from asyncio.sleep(0.0001, loop=self.loop)
self.assertFalse(long_task.done())
yield from client.send(
node_id, vanila_request, group=ConnectionGroup.COORDINATION)
resp_time = self.loop.time()
self.assertFalse(long_task.done())
fetch_resp = yield from long_task
# Check error code like resp->topics[0]->partitions[0]->error_code
self.assertEqual(fetch_resp.topics[0][1][0][1], 0)
# Check that vanila request actually executed after wait request
self.assertLess(resp_time - send_time, 0.5)