本文整理汇总了Python中kafka.consumer.subscription_state.SubscriptionState.missing_fetch_positions方法的典型用法代码示例。如果您正苦于以下问题:Python SubscriptionState.missing_fetch_positions方法的具体用法?Python SubscriptionState.missing_fetch_positions怎么用?Python SubscriptionState.missing_fetch_positions使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.consumer.subscription_state.SubscriptionState
的用法示例。
在下文中一共展示了SubscriptionState.missing_fetch_positions方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: KafkaConsumer
# 需要导入模块: from kafka.consumer.subscription_state import SubscriptionState [as 别名]
# 或者: from kafka.consumer.subscription_state.SubscriptionState import missing_fetch_positions [as 别名]
#.........这里部分代码省略.........
# fetched records.
self._fetcher.init_fetches()
return records
elapsed_ms = (time.time() - start) * 1000
remaining = timeout_ms - elapsed_ms
if remaining <= 0:
return {}
def _poll_once(self, timeout_ms):
"""
Do one round of polling. In addition to checking for new data, this does
any needed heart-beating, auto-commits, and offset updates.
Arguments:
timeout_ms (int): The maximum time in milliseconds to block
Returns:
dict: map of topic to list of records (may be empty)
"""
if self.config["api_version"] >= (0, 8, 2):
# TODO: Sub-requests should take into account the poll timeout (KAFKA-1894)
self._coordinator.ensure_coordinator_known()
if self.config["api_version"] >= (0, 9):
# ensure we have partitions assigned if we expect to
if self._subscription.partitions_auto_assigned():
self._coordinator.ensure_active_group()
# fetch positions if we have partitions we're subscribed to that we
# don't know the offset for
if not self._subscription.has_all_fetch_positions():
self._update_fetch_positions(self._subscription.missing_fetch_positions())
# init any new fetches (won't resend pending fetches)
records = self._fetcher.fetched_records()
# if data is available already, e.g. from a previous network client
# poll() call to commit, then just return it immediately
if records:
return records
self._fetcher.init_fetches()
self._client.poll(timeout_ms)
return self._fetcher.fetched_records()
def position(self, partition):
"""Get the offset of the next record that will be fetched
Arguments:
partition (TopicPartition): partition to check
"""
assert self._subscription.is_assigned(partition)
offset = self._subscription.assignment[partition].position
if offset is None:
self._update_fetch_positions(partition)
offset = self._subscription.assignment[partition].position
return offset
def pause(self, *partitions):
"""Suspend fetching from the requested partitions.
Future calls to poll() will not return any records from these partitions
until they have been resumed using resume(). Note that this method does
示例2: KafkaConsumer
# 需要导入模块: from kafka.consumer.subscription_state import SubscriptionState [as 别名]
# 或者: from kafka.consumer.subscription_state.SubscriptionState import missing_fetch_positions [as 别名]
#.........这里部分代码省略.........
while True:
records = self._poll_once(remaining, max_records)
if records:
return records
elapsed_ms = (time.time() - start) * 1000
remaining = timeout_ms - elapsed_ms
if remaining <= 0:
return {}
def _poll_once(self, timeout_ms, max_records):
"""
Do one round of polling. In addition to checking for new data, this does
any needed heart-beating, auto-commits, and offset updates.
Arguments:
timeout_ms (int): The maximum time in milliseconds to block
Returns:
dict: map of topic to list of records (may be empty)
"""
if self._use_consumer_group():
self._coordinator.ensure_coordinator_known()
self._coordinator.ensure_active_group()
# 0.8.2 brokers support kafka-backed offset storage via group coordinator
elif self.config['group_id'] is not None and self.config['api_version'] >= (0, 8, 2):
self._coordinator.ensure_coordinator_known()
# fetch positions if we have partitions we're subscribed to that we
# don't know the offset for
if not self._subscription.has_all_fetch_positions():
self._update_fetch_positions(self._subscription.missing_fetch_positions())
# if data is available already, e.g. from a previous network client
# poll() call to commit, then just return it immediately
records, partial = self._fetcher.fetched_records(max_records)
if records:
# before returning the fetched records, we can send off the
# next round of fetches and avoid block waiting for their
# responses to enable pipelining while the user is handling the
# fetched records.
if not partial:
self._fetcher.send_fetches()
return records
# send any new fetches (won't resend pending fetches)
self._fetcher.send_fetches()
self._client.poll(timeout_ms=timeout_ms, sleep=True)
records, _ = self._fetcher.fetched_records(max_records)
return records
def position(self, partition):
"""Get the offset of the next record that will be fetched
Arguments:
partition (TopicPartition): partition to check
Returns:
int: offset
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
示例3: AIOKafkaConsumer
# 需要导入模块: from kafka.consumer.subscription_state import SubscriptionState [as 别名]
# 或者: from kafka.consumer.subscription_state.SubscriptionState import missing_fetch_positions [as 别名]
#.........这里部分代码省略.........
key_deserializer=self._key_deserializer,
value_deserializer=self._value_deserializer,
fetch_min_bytes=self._fetch_min_bytes,
fetch_max_wait_ms=self._fetch_max_wait_ms,
max_partition_fetch_bytes=self._max_partition_fetch_bytes,
check_crcs=self._check_crcs,
fetcher_timeout=self._consumer_timeout)
if self._group_id is not None:
# using group coordinator for automatic partitions assignment
self._coordinator = GroupCoordinator(
self._client, self._subscription, loop=self._loop,
group_id=self._group_id,
heartbeat_interval_ms=self._heartbeat_interval_ms,
retry_backoff_ms=self._retry_backoff_ms,
enable_auto_commit=self._enable_auto_commit,
auto_commit_interval_ms=self._auto_commit_interval_ms,
assignors=self._partition_assignment_strategy)
self._coordinator.on_group_rebalanced(
self._on_change_subscription)
yield from self._coordinator.ensure_active_group()
elif self._subscription.needs_partition_assignment:
# using manual partitions assignment by topic(s)
yield from self._client.force_metadata_update()
partitions = []
for topic in self._topics:
p_ids = self.partitions_for_topic(topic)
for p_id in p_ids:
partitions.append(TopicPartition(topic, p_id))
self._subscription.unsubscribe()
self._subscription.assign_from_user(partitions)
yield from self._update_fetch_positions(
self._subscription.missing_fetch_positions())
def assign(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): assignment for this instance.
Raises:
IllegalStateError: if consumer has already called subscribe()
Warning:
It is not possible to use both manual partition assignment with
assign() and group assignment with subscribe().
Note:
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
no rebalance operation triggered when group membership or cluster
and topic metadata change.
"""
self._subscription.assign_from_user(partitions)
self._on_change_subscription()
self._client.set_topics([tp.topic for tp in partitions])
def assignment(self):
"""Get the TopicPartitions currently assigned to this consumer.
If partitions were directly assigned using assign(), then this will