本文整理汇总了Python中aiokafka.client.AIOKafkaClient.force_metadata_update方法的典型用法代码示例。如果您正苦于以下问题:Python AIOKafkaClient.force_metadata_update方法的具体用法?Python AIOKafkaClient.force_metadata_update怎么用?Python AIOKafkaClient.force_metadata_update使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类aiokafka.client.AIOKafkaClient
的用法示例。
在下文中一共展示了AIOKafkaClient.force_metadata_update方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_compacted_topic_consumption
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import force_metadata_update [as 别名]
def test_compacted_topic_consumption(self):
# Compacted topics can have offsets skipped
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=[])
client.ready = mock.MagicMock()
client.ready.side_effect = asyncio.coroutine(lambda a: True)
client.force_metadata_update = mock.MagicMock()
client.force_metadata_update.side_effect = asyncio.coroutine(
lambda: False)
client.send = mock.MagicMock()
subscriptions = SubscriptionState(loop=self.loop)
fetcher = Fetcher(client, subscriptions, loop=self.loop)
tp = TopicPartition('test', 0)
req = FetchRequest(
-1, # replica_id
100, 100, [(tp.topic, [(tp.partition, 155, 100000)])])
builder = LegacyRecordBatchBuilder(
magic=1, compression_type=0, batch_size=99999999)
builder.append(160, value=b"12345", key=b"1", timestamp=None)
builder.append(162, value=b"23456", key=b"2", timestamp=None)
builder.append(167, value=b"34567", key=b"3", timestamp=None)
batch = bytes(builder.build())
resp = FetchResponse(
[('test', [(
0, 0, 3000, # partition, error_code, highwater_offset
batch # Batch raw bytes
)])])
subscriptions.assign_from_user({tp})
assignment = subscriptions.subscription.assignment
tp_state = assignment.state_value(tp)
client.send.side_effect = asyncio.coroutine(lambda n, r: resp)
tp_state.seek(155)
fetcher._in_flight.add(0)
needs_wake_up = yield from fetcher._proc_fetch_request(
assignment, 0, req)
self.assertEqual(needs_wake_up, True)
buf = fetcher._records[tp]
# Test successful getone, the closest in batch offset=160
first = buf.getone()
self.assertEqual(tp_state.position, 161)
self.assertEqual(
(first.value, first.key, first.offset),
(b"12345", b"1", 160))
# Test successful getmany
second, third = buf.getall()
self.assertEqual(tp_state.position, 168)
self.assertEqual(
(second.value, second.key, second.offset),
(b"23456", b"2", 162))
self.assertEqual(
(third.value, third.key, third.offset),
(b"34567", b"3", 167))
示例2: test_metadata_update_fail
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import force_metadata_update [as 别名]
def test_metadata_update_fail(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
# Make sure the connection is initialize before mock to avoid crashing
# api_version routine
yield from client.force_metadata_update()
with mock.patch.object(
AIOKafkaConnection, 'send') as mocked:
mocked.side_effect = KafkaError('mocked exception')
updated = yield from client.force_metadata_update()
self.assertEqual(updated, False)
with self.assertRaises(KafkaError):
yield from client.fetch_all_metadata()
yield from client.close()
示例3: test_force_metadata_update_multiple_times
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import force_metadata_update [as 别名]
def test_force_metadata_update_multiple_times(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=self.hosts,
metadata_max_age_ms=10000)
yield from client.bootstrap()
self.add_cleanup(client.close)
orig = client._metadata_update
with mock.patch.object(client, '_metadata_update') as mocked:
@asyncio.coroutine
def new(*args, **kw):
yield from asyncio.sleep(0.2, loop=self.loop)
return (yield from orig(*args, **kw))
mocked.side_effect = new
client.force_metadata_update()
yield from asyncio.sleep(0.01, loop=self.loop)
self.assertEqual(
len(client._metadata_update.mock_calls), 1)
client.force_metadata_update()
yield from asyncio.sleep(0.01, loop=self.loop)
self.assertEqual(
len(client._metadata_update.mock_calls), 1)
client.force_metadata_update()
yield from asyncio.sleep(0.5, loop=self.loop)
self.assertEqual(
len(client._metadata_update.mock_calls), 1)
示例4: test_metadata_update_fail
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import force_metadata_update [as 别名]
def test_metadata_update_fail(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
with mock.patch.object(
AIOKafkaConnection, 'send') as mocked:
mocked.side_effect = KafkaError('mocked exception')
updated = yield from client.force_metadata_update()
self.assertEqual(updated, False)
with self.assertRaises(KafkaError):
yield from client.fetch_all_metadata()
示例5: test_update_fetch_positions
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import force_metadata_update [as 别名]
def test_update_fetch_positions(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=[])
subscriptions = SubscriptionState('latest')
fetcher = Fetcher(client, subscriptions, loop=self.loop)
partition = TopicPartition('test', 0)
# partition is not assigned, should be ignored
yield from fetcher.update_fetch_positions([partition])
state = TopicPartitionState()
state.seek(0)
subscriptions.assignment[partition] = state
# partition is fetchable, no need to update position
yield from fetcher.update_fetch_positions([partition])
client.ready = mock.MagicMock()
client.ready.side_effect = asyncio.coroutine(lambda a: True)
client.force_metadata_update = mock.MagicMock()
client.force_metadata_update.side_effect = asyncio.coroutine(
lambda: False)
client.send = mock.MagicMock()
client.send.side_effect = asyncio.coroutine(
lambda n, r: OffsetResponse([('test', [(0, 0, [4])])]))
state.await_reset(OffsetResetStrategy.LATEST)
client.cluster.leader_for_partition = mock.MagicMock()
client.cluster.leader_for_partition.side_effect = [None, -1, 0]
yield from fetcher.update_fetch_positions([partition])
self.assertEqual(state.position, 4)
client.cluster.leader_for_partition = mock.MagicMock()
client.cluster.leader_for_partition.return_value = 1
client.send = mock.MagicMock()
client.send.side_effect = asyncio.coroutine(
lambda n, r: OffsetResponse([('test', [(0, 3, [])])]))
state.await_reset(OffsetResetStrategy.LATEST)
with self.assertRaises(UnknownTopicOrPartitionError):
yield from fetcher.update_fetch_positions([partition])
client.send.side_effect = asyncio.coroutine(
lambda n, r: OffsetResponse([('test', [(0, -1, [])])]))
with self.assertRaises(UnknownError):
yield from fetcher.update_fetch_positions([partition])
yield from fetcher.close()
示例6: test_proc_fetch_request
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import force_metadata_update [as 别名]
def test_proc_fetch_request(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=[])
subscriptions = SubscriptionState('latest')
fetcher = Fetcher(client, subscriptions, loop=self.loop)
tp = TopicPartition('test', 0)
tp_info = (tp.topic, [(tp.partition, 155, 100000)])
req = FetchRequest(
-1, # replica_id
100, 100, [tp_info])
client.ready = mock.MagicMock()
client.ready.side_effect = asyncio.coroutine(lambda a: True)
client.force_metadata_update = mock.MagicMock()
client.force_metadata_update.side_effect = asyncio.coroutine(
lambda: False)
client.send = mock.MagicMock()
msg = Message(b"test msg")
msg._encode_self()
client.send.side_effect = asyncio.coroutine(
lambda n, r: FetchResponse(
[('test', [(0, 0, 9, [(4, 10, msg)])])]))
fetcher._in_flight.add(0)
needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
self.assertEqual(needs_wake_up, False)
state = TopicPartitionState()
state.seek(0)
subscriptions.assignment[tp] = state
subscriptions.needs_partition_assignment = False
fetcher._in_flight.add(0)
needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
self.assertEqual(needs_wake_up, True)
buf = fetcher._records[tp]
self.assertEqual(buf.getone(), None) # invalid offset, msg is ignored
state.seek(4)
fetcher._in_flight.add(0)
fetcher._records.clear()
needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
self.assertEqual(needs_wake_up, True)
buf = fetcher._records[tp]
self.assertEqual(buf.getone().value, b"test msg")
# error -> no partition found
client.send.side_effect = asyncio.coroutine(
lambda n, r: FetchResponse(
[('test', [(0, 3, 9, [(4, 10, msg)])])]))
fetcher._in_flight.add(0)
fetcher._records.clear()
needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
self.assertEqual(needs_wake_up, False)
# error -> topic auth failed
client.send.side_effect = asyncio.coroutine(
lambda n, r: FetchResponse(
[('test', [(0, 29, 9, [(4, 10, msg)])])]))
fetcher._in_flight.add(0)
fetcher._records.clear()
needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
self.assertEqual(needs_wake_up, True)
with self.assertRaises(TopicAuthorizationFailedError):
yield from fetcher.next_record([])
# error -> unknown
client.send.side_effect = asyncio.coroutine(
lambda n, r: FetchResponse(
[('test', [(0, -1, 9, [(4, 10, msg)])])]))
fetcher._in_flight.add(0)
fetcher._records.clear()
needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
self.assertEqual(needs_wake_up, False)
# error -> offset out of range
client.send.side_effect = asyncio.coroutine(
lambda n, r: FetchResponse(
[('test', [(0, 1, 9, [(4, 10, msg)])])]))
fetcher._in_flight.add(0)
fetcher._records.clear()
needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
self.assertEqual(needs_wake_up, False)
self.assertEqual(state.is_fetchable(), False)
state.seek(4)
subscriptions._default_offset_reset_strategy = OffsetResetStrategy.NONE
client.send.side_effect = asyncio.coroutine(
lambda n, r: FetchResponse(
[('test', [(0, 1, 9, [(4, 10, msg)])])]))
fetcher._in_flight.add(0)
fetcher._records.clear()
needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
self.assertEqual(needs_wake_up, True)
with self.assertRaises(OffsetOutOfRangeError):
yield from fetcher.next_record([])
yield from fetcher.close()
示例7: AIOKafkaConsumer
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import force_metadata_update [as 别名]
#.........这里部分代码省略.........
self._api_version = tuple(
map(int, self._api_version.split('.')))
if self._api_version < (0, 9):
raise ValueError(
"Unsupported Kafka version: {}".format(self._api_version))
self._fetcher = Fetcher(
self._client, self._subscription, loop=self._loop,
key_deserializer=self._key_deserializer,
value_deserializer=self._value_deserializer,
fetch_min_bytes=self._fetch_min_bytes,
fetch_max_wait_ms=self._fetch_max_wait_ms,
max_partition_fetch_bytes=self._max_partition_fetch_bytes,
check_crcs=self._check_crcs,
fetcher_timeout=self._consumer_timeout)
if self._group_id is not None:
# using group coordinator for automatic partitions assignment
self._coordinator = GroupCoordinator(
self._client, self._subscription, loop=self._loop,
group_id=self._group_id,
heartbeat_interval_ms=self._heartbeat_interval_ms,
retry_backoff_ms=self._retry_backoff_ms,
enable_auto_commit=self._enable_auto_commit,
auto_commit_interval_ms=self._auto_commit_interval_ms,
assignors=self._partition_assignment_strategy)
self._coordinator.on_group_rebalanced(
self._on_change_subscription)
yield from self._coordinator.ensure_active_group()
elif self._subscription.needs_partition_assignment:
# using manual partitions assignment by topic(s)
yield from self._client.force_metadata_update()
partitions = []
for topic in self._topics:
p_ids = self.partitions_for_topic(topic)
for p_id in p_ids:
partitions.append(TopicPartition(topic, p_id))
self._subscription.unsubscribe()
self._subscription.assign_from_user(partitions)
yield from self._update_fetch_positions(
self._subscription.missing_fetch_positions())
def assign(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): assignment for this instance.
Raises:
IllegalStateError: if consumer has already called subscribe()
Warning:
It is not possible to use both manual partition assignment with
assign() and group assignment with subscribe().
Note:
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
no rebalance operation triggered when group membership or cluster
and topic metadata change.
示例8: test_load_metadata
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import force_metadata_update [as 别名]
def test_load_metadata(self):
brokers = [
(0, 'broker_1', 4567),
(1, 'broker_2', 5678)
]
topics = [
(NO_ERROR, 'topic_1', [
(NO_ERROR, 0, 1, [1, 2], [1, 2])
]),
(NO_ERROR, 'topic_2', [
(NO_LEADER, 0, -1, [], []),
(NO_LEADER, 1, 1, [], []),
]),
(NO_LEADER, 'topic_no_partitions', []),
(UNKNOWN_TOPIC_OR_PARTITION, 'topic_unknown', []),
(NO_ERROR, 'topic_3', [
(NO_ERROR, 0, 0, [0, 1], [0, 1]),
(NO_ERROR, 1, 1, [1, 0], [1, 0]),
(NO_ERROR, 2, 0, [0, 1], [0, 1])
]),
(NO_ERROR, 'topic_4', [
(NO_ERROR, 0, 0, [0, 1], [0, 1]),
(REPLICA_NOT_AVAILABLE, 1, 1, [1, 0], [1, 0]),
])
]
@asyncio.coroutine
def send(request_id):
return MetadataResponse(brokers, topics)
mocked_conns = {0: mock.MagicMock()}
mocked_conns[0].send.side_effect = send
client = AIOKafkaClient(loop=self.loop,
bootstrap_servers=['broker_1:4567'])
task = asyncio.async(client._md_synchronizer(), loop=self.loop)
client._conns = mocked_conns
client.cluster.update_metadata(MetadataResponse(brokers[:1], []))
self.loop.run_until_complete(client.force_metadata_update())
task.cancel()
md = client.cluster
c_brokers = md.brokers()
self.assertEqual(len(c_brokers), 2)
self.assertEqual(sorted(brokers), sorted(list(c_brokers)))
c_topics = md.topics()
self.assertEqual(len(c_topics), 4)
self.assertEqual(md.partitions_for_topic('topic_1'), set([0]))
self.assertEqual(md.partitions_for_topic('topic_2'), set([0, 1]))
self.assertEqual(md.partitions_for_topic('topic_3'), set([0, 1, 2]))
self.assertEqual(md.partitions_for_topic('topic_4'), set([0, 1]))
self.assertEqual(
md.available_partitions_for_topic('topic_2'), set([1]))
mocked_conns[0].connected.return_value = False
is_ready = self.loop.run_until_complete(client.ready(0))
self.assertEqual(is_ready, False)
is_ready = self.loop.run_until_complete(client.ready(1))
self.assertEqual(is_ready, False)
self.assertEqual(mocked_conns, {})
with self.assertRaises(NodeNotReadyError):
self.loop.run_until_complete(client.send(0, None))
示例9: AIOKafkaProducer
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import force_metadata_update [as 别名]
#.........这里部分代码省略.........
self._sender_task.cancel()
yield from self._sender_task
yield from self.client.close()
self._closed = True
log.debug("The Kafka producer has closed.")
@asyncio.coroutine
def partitions_for(self, topic):
"""Returns set of all known partitions for the topic."""
return (yield from self._wait_on_metadata(topic))
@asyncio.coroutine
def _wait_on_metadata(self, topic):
"""
Wait for cluster metadata including partitions for the given topic to
be available.
Arguments:
topic (str): topic we want metadata for
Returns:
set: partition ids for the topic
Raises:
UnknownTopicOrPartitionError: if no topic or partitions found
in cluster metadata
"""
if topic in self.client.cluster.topics():
return self._metadata.partitions_for_topic(topic)
# add topic to metadata topic list if it is not there already.
self.client.add_topic(topic)
yield from self.client.force_metadata_update()
if topic not in self.client.cluster.topics():
raise UnknownTopicOrPartitionError()
return self._metadata.partitions_for_topic(topic)
@asyncio.coroutine
def send(self, topic, value=None, key=None, partition=None):
"""Publish a message to a topic.
Arguments:
topic (str): topic where the message will be published
value (optional): message value. Must be type bytes, or be
serializable to bytes via configured value_serializer. If value
is None, key is required and message acts as a 'delete'.
See kafka compaction documentation for more details:
http://kafka.apache.org/documentation.html#compaction
(compaction requires kafka >= 0.8.1)
partition (int, optional): optionally specify a partition. If not
set, the partition will be selected using the configured
'partitioner'.
key (optional): a key to associate with the message. Can be used to
determine which partition to send the message to. If partition
is None (and producer's partitioner config is left as default),
then messages with the same key will be delivered to the same
partition (but if key is None, partition is chosen randomly).
Must be type bytes, or be serializable to bytes via configured
key_serializer.
Returns:
asyncio.Future: future object that will be set when message is
processed
示例10: test_proc_fetch_request
# 需要导入模块: from aiokafka.client import AIOKafkaClient [as 别名]
# 或者: from aiokafka.client.AIOKafkaClient import force_metadata_update [as 别名]
def test_proc_fetch_request(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=[])
subscriptions = SubscriptionState(loop=self.loop)
fetcher = Fetcher(
client, subscriptions, auto_offset_reset="latest", loop=self.loop)
tp = TopicPartition('test', 0)
tp_info = (tp.topic, [(tp.partition, 4, 100000)])
req = FetchRequest(
-1, # replica_id
100, 100, [tp_info])
client.ready = mock.MagicMock()
client.ready.side_effect = asyncio.coroutine(lambda a: True)
client.force_metadata_update = mock.MagicMock()
client.force_metadata_update.side_effect = asyncio.coroutine(
lambda: False)
client.send = mock.MagicMock()
builder = LegacyRecordBatchBuilder(
magic=1, compression_type=0, batch_size=99999999)
builder.append(offset=4, value=b"test msg", key=None, timestamp=None)
raw_batch = bytes(builder.build())
client.send.side_effect = asyncio.coroutine(
lambda n, r: FetchResponse(
[('test', [(0, 0, 9, raw_batch)])]))
subscriptions.assign_from_user({tp})
assignment = subscriptions.subscription.assignment
tp_state = assignment.state_value(tp)
# The partition has no active position, so will ignore result
needs_wake_up = yield from fetcher._proc_fetch_request(
assignment, 0, req)
self.assertEqual(needs_wake_up, False)
self.assertEqual(fetcher._records, {})
# The partition's position does not match request's fetch offset
subscriptions.seek(tp, 0)
needs_wake_up = yield from fetcher._proc_fetch_request(
assignment, 0, req)
self.assertEqual(needs_wake_up, False)
self.assertEqual(fetcher._records, {})
subscriptions.seek(tp, 4)
needs_wake_up = yield from fetcher._proc_fetch_request(
assignment, 0, req)
self.assertEqual(needs_wake_up, True)
buf = fetcher._records[tp]
self.assertEqual(buf.getone().value, b"test msg")
# If position changed after fetch request passed
subscriptions.seek(tp, 4)
needs_wake_up = yield from fetcher._proc_fetch_request(
assignment, 0, req)
subscriptions.seek(tp, 10)
self.assertIsNone(buf.getone())
# If assignment is lost after fetch request passed
subscriptions.seek(tp, 4)
needs_wake_up = yield from fetcher._proc_fetch_request(
assignment, 0, req)
subscriptions.unsubscribe()
self.assertIsNone(buf.getone())
subscriptions.assign_from_user({tp})
assignment = subscriptions.subscription.assignment
tp_state = assignment.state_value(tp)
# error -> no partition found (UnknownTopicOrPartitionError)
subscriptions.seek(tp, 4)
fetcher._records.clear()
client.send.side_effect = asyncio.coroutine(
lambda n, r: FetchResponse(
[('test', [(0, 3, 9, raw_batch)])]))
cc = client.force_metadata_update.call_count
needs_wake_up = yield from fetcher._proc_fetch_request(
assignment, 0, req)
self.assertEqual(needs_wake_up, False)
self.assertEqual(client.force_metadata_update.call_count, cc + 1)
# error -> topic auth failed (TopicAuthorizationFailedError)
client.send.side_effect = asyncio.coroutine(
lambda n, r: FetchResponse(
[('test', [(0, 29, 9, raw_batch)])]))
needs_wake_up = yield from fetcher._proc_fetch_request(
assignment, 0, req)
self.assertEqual(needs_wake_up, True)
with self.assertRaises(TopicAuthorizationFailedError):
yield from fetcher.next_record([])
# error -> unknown
client.send.side_effect = asyncio.coroutine(
lambda n, r: FetchResponse(
[('test', [(0, -1, 9, raw_batch)])]))
needs_wake_up = yield from fetcher._proc_fetch_request(
assignment, 0, req)
self.assertEqual(needs_wake_up, False)
#.........这里部分代码省略.........