本文整理汇总了Python中aiokafka.client.AIOKafkaClient类的典型用法代码示例。如果您正苦于以下问题:Python AIOKafkaClient类的具体用法?Python AIOKafkaClient怎么用?Python AIOKafkaClient使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AIOKafkaClient类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_send_request
def test_send_request(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
node_id = client.get_random_node()
resp = yield from client.send(node_id, MetadataRequest([]))
self.assertTrue(isinstance(resp, MetadataResponse))
yield from client.close()
示例2: test_subscribe_pattern
def test_subscribe_pattern(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
test_listener = RebalanceListenerForTest()
subscription = SubscriptionState('latest')
subscription.subscribe(pattern='st-topic*', listener=test_listener)
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='subs-pattern-group')
yield from self.wait_topic(client, 'st-topic1')
yield from self.wait_topic(client, 'st-topic2')
yield from coordinator.ensure_active_group()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.rejoin_needed, False)
tp_list = subscription.assigned_partitions()
assigned = set([('st-topic1', 0), ('st-topic1', 1),
('st-topic2', 0), ('st-topic2', 1)])
self.assertEqual(tp_list, assigned)
self.assertEqual(test_listener.revoked, [set([])])
self.assertEqual(test_listener.assigned, [assigned])
yield from coordinator.close()
yield from client.close()
示例3: test_get_leader_for_unassigned_partitions
def test_get_leader_for_unassigned_partitions(self, protocol):
@asyncio.coroutine
def recv(request_id):
return b"response"
mocked_conns = {("broker_1", 4567): mock.MagicMock()}
mocked_conns[("broker_1", 4567)].recv.side_effect = recv
client = AIOKafkaClient(["broker_1:4567"], loop=self.loop)
client._conns = mocked_conns
brokers = [BrokerMetadata(0, "broker_1", 4567), BrokerMetadata(1, "broker_2", 5678)]
topics = [
TopicMetadata("topic_no_partitions", NO_LEADER, []),
TopicMetadata("topic_unknown", UNKNOWN_TOPIC_OR_PARTITION, []),
]
protocol.decode_metadata_response.return_value = MetadataResponse(brokers, topics)
self.loop.run_until_complete(client.load_metadata_for_topics())
self.assertDictEqual({}, client._topics_to_brokers)
with self.assertRaises(LeaderNotAvailableError):
self.loop.run_until_complete(client._get_leader_for_partition("topic_no_partitions", 0))
with self.assertRaises(UnknownTopicOrPartitionError):
self.loop.run_until_complete(client._get_leader_for_partition("topic_unknown", 0))
示例4: test_send_broker_unaware_request_fail
def test_send_broker_unaware_request_fail(self):
"Tests that call fails when all hosts are unavailable"
mocked_conns = {("kafka01", 9092): mock.MagicMock(), ("kafka02", 9092): mock.MagicMock()}
# inject KafkaConnection side effects
fut1 = asyncio.Future(loop=self.loop)
fut1.set_exception(RuntimeError("kafka01 went away (unittest)"))
mocked_conns[("kafka01", 9092)].send.return_value = fut1
fut2 = asyncio.Future(loop=self.loop)
fut2.set_exception(RuntimeError("kafka02 went away (unittest)"))
mocked_conns[("kafka02", 9092)].send.return_value = fut2
client = AIOKafkaClient(["kafka01:9092", "kafka02:9092"], loop=self.loop)
client._conns = mocked_conns
@asyncio.coroutine
def go():
with self.assertRaises(KafkaUnavailableError):
yield from client._send_broker_unaware_request(
payloads=["fake request"],
encoder_fn=mock.MagicMock(return_value=b"fake encoded message"),
decoder_fn=lambda x: x,
)
for key, conn in mocked_conns.items():
conn.send.assert_called_with(b"fake encoded message")
self.loop.run_until_complete(go())
示例5: test_client_receive_zero_brokers
def test_client_receive_zero_brokers(self):
brokers = [
(0, 'broker_1', 4567),
(1, 'broker_2', 5678)
]
correct_meta = MetadataResponse(brokers, [])
bad_response = MetadataResponse([], [])
@asyncio.coroutine
def send(*args, **kwargs):
return bad_response
client = AIOKafkaClient(loop=self.loop,
bootstrap_servers=['broker_1:4567'],
api_version="0.10")
conn = mock.Mock()
client._conns = [mock.Mock()]
client._get_conn = mock.Mock()
client._get_conn.side_effect = asyncio.coroutine(lambda x: conn)
conn.send = mock.Mock()
conn.send.side_effect = send
client.cluster.update_metadata(correct_meta)
brokers_before = client.cluster.brokers()
yield from client._metadata_update(client.cluster, [])
# There broker list should not be purged
self.assertNotEqual(client.cluster.brokers(), set([]))
self.assertEqual(client.cluster.brokers(), brokers_before)
示例6: test_send_produce_request_raises_when_noleader
def test_send_produce_request_raises_when_noleader(self, protocol):
"""Send producer request raises LeaderNotAvailableError
if leader is not available"""
@asyncio.coroutine
def recv(request_id):
return b"response"
mocked_conns = {("broker_1", 4567): mock.MagicMock()}
mocked_conns[("broker_1", 4567)].recv.side_effect = recv
client = AIOKafkaClient(["broker_1:4567"], loop=self.loop)
client._conns = mocked_conns
brokers = [BrokerMetadata(0, "broker_1", 4567), BrokerMetadata(1, "broker_2", 5678)]
topics = [
TopicMetadata(
"topic_noleader",
NO_ERROR,
[
PartitionMetadata("topic_noleader", 0, -1, [], [], NO_LEADER),
PartitionMetadata("topic_noleader", 1, -1, [], [], NO_LEADER),
],
)
]
protocol.decode_metadata_response.return_value = MetadataResponse(brokers, topics)
self.loop.run_until_complete(client.load_metadata_for_topics())
requests = [ProduceRequest("topic_noleader", 0, [create_message("a"), create_message("b")])]
with self.assertRaises(LeaderNotAvailableError):
self.loop.run_until_complete(client.send_produce_request(requests))
示例7: test_send_broker_unaware_request
def test_send_broker_unaware_request(self):
"Tests that call works when at least one of the host is available"
mocked_conns = {
("kafka01", 9092): mock.MagicMock(),
("kafka02", 9092): mock.MagicMock(),
("kafka03", 9092): mock.MagicMock(),
}
# inject KafkaConnection side effects
fut = asyncio.Future(loop=self.loop)
fut.set_exception(RuntimeError("kafka01 went away (unittest)"))
mocked_conns[("kafka01", 9092)].send.return_value = fut
fut2 = asyncio.Future(loop=self.loop)
fut2.set_result(b"valid response")
mocked_conns[("kafka02", 9092)].send.return_value = fut2
fut3 = asyncio.Future(loop=self.loop)
fut3.set_exception(RuntimeError("kafka03 went away (unittest)"))
mocked_conns[("kafka03", 9092)].send.return_value = fut3
client = AIOKafkaClient("kafka01:9092,kafka02:9092", loop=self.loop)
client._conns = mocked_conns
resp = self.loop.run_until_complete(
client._send_broker_unaware_request(
payloads=[b"fake request"], encoder_fn=mock.MagicMock(), decoder_fn=lambda x: x
)
)
self.assertEqual(b"valid response", resp)
示例8: test_compacted_topic_consumption
def test_compacted_topic_consumption(self):
# Compacted topics can have offsets skipped
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=[])
client.ready = mock.MagicMock()
client.ready.side_effect = asyncio.coroutine(lambda a: True)
client.force_metadata_update = mock.MagicMock()
client.force_metadata_update.side_effect = asyncio.coroutine(
lambda: False)
client.send = mock.MagicMock()
subscriptions = SubscriptionState(loop=self.loop)
fetcher = Fetcher(client, subscriptions, loop=self.loop)
tp = TopicPartition('test', 0)
req = FetchRequest(
-1, # replica_id
100, 100, [(tp.topic, [(tp.partition, 155, 100000)])])
builder = LegacyRecordBatchBuilder(
magic=1, compression_type=0, batch_size=99999999)
builder.append(160, value=b"12345", key=b"1", timestamp=None)
builder.append(162, value=b"23456", key=b"2", timestamp=None)
builder.append(167, value=b"34567", key=b"3", timestamp=None)
batch = bytes(builder.build())
resp = FetchResponse(
[('test', [(
0, 0, 3000, # partition, error_code, highwater_offset
batch # Batch raw bytes
)])])
subscriptions.assign_from_user({tp})
assignment = subscriptions.subscription.assignment
tp_state = assignment.state_value(tp)
client.send.side_effect = asyncio.coroutine(lambda n, r: resp)
tp_state.seek(155)
fetcher._in_flight.add(0)
needs_wake_up = yield from fetcher._proc_fetch_request(
assignment, 0, req)
self.assertEqual(needs_wake_up, True)
buf = fetcher._records[tp]
# Test successful getone, the closest in batch offset=160
first = buf.getone()
self.assertEqual(tp_state.position, 161)
self.assertEqual(
(first.value, first.key, first.offset),
(b"12345", b"1", 160))
# Test successful getmany
second, third = buf.getall()
self.assertEqual(tp_state.position, 168)
self.assertEqual(
(second.value, second.key, second.offset),
(b"23456", b"2", 162))
self.assertEqual(
(third.value, third.key, third.offset),
(b"34567", b"3", 167))
示例9: test_failed_group_join
def test_failed_group_join(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
subscription = SubscriptionState("latest")
subscription.subscribe(topics=("topic1",))
coordinator = GroupCoordinator(client, subscription, loop=self.loop, retry_backoff_ms=10)
yield from client.bootstrap()
yield from self.wait_topic(client, "topic1")
mocked = mock.MagicMock()
coordinator._client = mocked
# no exception expected, just wait
mocked.send.side_effect = Errors.GroupLoadInProgressError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
mocked.send.side_effect = Errors.InvalidGroupIdError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
# no exception expected, member_id should be reseted
coordinator.member_id = "some_invalid_member_id"
mocked.send.side_effect = Errors.UnknownMemberIdError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
self.assertEqual(coordinator.member_id, JoinGroupRequest.UNKNOWN_MEMBER_ID)
# no exception expected, coordinator_id should be reseted
coordinator.coordinator_id = "some_id"
mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
self.assertEqual(coordinator.coordinator_id, None)
yield from client.close()
示例10: test_no_concurrent_send_on_connection
def test_no_concurrent_send_on_connection(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=self.hosts,
metadata_max_age_ms=10000)
yield from client.bootstrap()
self.add_cleanup(client.close)
yield from self.wait_topic(client, self.topic)
node_id = client.get_random_node()
wait_request = FetchRequest_v0(
-1, # replica_id
500, # max_wait_ms
1024 * 1024, # min_bytes
[(self.topic, [(0, 0, 1024)]
)])
vanila_request = MetadataRequest([])
send_time = self.loop.time()
long_task = self.loop.create_task(
client.send(node_id, wait_request)
)
yield from asyncio.sleep(0.0001, loop=self.loop)
self.assertFalse(long_task.done())
yield from client.send(node_id, vanila_request)
resp_time = self.loop.time()
fetch_resp = yield from long_task
# Check error code like resp->topics[0]->partitions[0]->error_code
self.assertEqual(fetch_resp.topics[0][1][0][1], 0)
# Check that vanila request actually executed after wait request
self.assertGreaterEqual(resp_time - send_time, 0.5)
示例11: test_get_offsets
def test_get_offsets(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
subscription = SubscriptionState("earliest")
subscription.subscribe(topics=("topic1",))
coordinator = GroupCoordinator(client, subscription, loop=self.loop, group_id="getoffsets-group")
yield from self.wait_topic(client, "topic1")
producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=self.hosts)
yield from producer.start()
yield from producer.send("topic1", b"first msg", partition=0)
yield from producer.send("topic1", b"second msg", partition=1)
yield from producer.send("topic1", b"third msg", partition=1)
yield from producer.stop()
yield from coordinator.ensure_active_group()
offsets = {
TopicPartition("topic1", 0): OffsetAndMetadata(1, ""),
TopicPartition("topic1", 1): OffsetAndMetadata(2, ""),
}
yield from coordinator.commit_offsets(offsets)
self.assertEqual(subscription.all_consumed_offsets(), {})
subscription.seek(("topic1", 0), 0)
subscription.seek(("topic1", 1), 0)
yield from coordinator.refresh_committed_offsets()
self.assertEqual(subscription.assignment[("topic1", 0)].committed, 1)
self.assertEqual(subscription.assignment[("topic1", 1)].committed, 2)
yield from coordinator.close()
yield from client.close()
示例12: test_close
def test_close(self):
client = AIOKafkaClient(["broker_1:4567"], loop=self.loop)
m1 = mock.Mock()
m2 = mock.Mock()
client._conns = {("host1", 4567): m1, ("host2", 5678): m2}
client.close()
self.assertEqual({}, client._conns)
m1.close.assert_raises_with()
m2.close.assert_raises_with()
示例13: test_init_with_list
def test_init_with_list(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=['kafka01:9092', 'kafka02:9092', 'kafka03:9092'])
self.assertEqual(
'<AIOKafkaClient client_id=aiokafka-0.0.1>', client.__repr__())
self.assertEqual(sorted({'kafka01': 9092,
'kafka02': 9092,
'kafka03': 9092}.items()),
sorted(client.hosts))
node = client.get_random_node()
self.assertEqual(node, None) # unknown cluster metadata
示例14: test_offsets_failed_scenarios
def test_offsets_failed_scenarios(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
yield from self.wait_topic(client, 'topic1')
subscription = SubscriptionState('earliest')
subscription.subscribe(topics=('topic1',))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='test-offsets-group')
yield from coordinator.ensure_active_group()
offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
yield from coordinator.commit_offsets(offsets)
with mock.patch('kafka.common.for_code') as mocked:
mocked.return_value = Errors.GroupAuthorizationFailedError
with self.assertRaises(Errors.GroupAuthorizationFailedError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.TopicAuthorizationFailedError
with self.assertRaises(Errors.TopicAuthorizationFailedError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.InvalidCommitOffsetSizeError
with self.assertRaises(Errors.InvalidCommitOffsetSizeError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.GroupLoadInProgressError
with self.assertRaises(Errors.GroupLoadInProgressError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.RebalanceInProgressError
with self.assertRaises(Errors.RebalanceInProgressError):
yield from coordinator.commit_offsets(offsets)
self.assertEqual(subscription.needs_partition_assignment, True)
mocked.return_value = KafkaError
with self.assertRaises(KafkaError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.NotCoordinatorForGroupError
with self.assertRaises(Errors.NotCoordinatorForGroupError):
yield from coordinator.commit_offsets(offsets)
self.assertEqual(coordinator.coordinator_id, None)
with self.assertRaises(
Errors.GroupCoordinatorNotAvailableError):
yield from coordinator.commit_offsets(offsets)
yield from coordinator.close()
yield from client.close()
示例15: test_init_with_list
def test_init_with_list(self):
client = AIOKafkaClient(
loop=self.loop, bootstrap_servers=[
'127.0.0.1:9092', '127.0.0.2:9092', '127.0.0.3:9092'])
self.assertEqual(
'<AIOKafkaClient client_id=aiokafka-0.1.2>', client.__repr__())
self.assertEqual(
sorted([('127.0.0.1', 9092, socket.AF_INET),
('127.0.0.2', 9092, socket.AF_INET),
('127.0.0.3', 9092, socket.AF_INET)]),
sorted(client.hosts))
node = client.get_random_node()
self.assertEqual(node, None) # unknown cluster metadata