本文整理汇总了Python中kafka.consumer.subscription_state.SubscriptionState.has_all_fetch_positions方法的典型用法代码示例。如果您正苦于以下问题:Python SubscriptionState.has_all_fetch_positions方法的具体用法?Python SubscriptionState.has_all_fetch_positions怎么用?Python SubscriptionState.has_all_fetch_positions使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.consumer.subscription_state.SubscriptionState
的用法示例。
在下文中一共展示了SubscriptionState.has_all_fetch_positions方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: KafkaConsumer
# 需要导入模块: from kafka.consumer.subscription_state import SubscriptionState [as 别名]
# 或者: from kafka.consumer.subscription_state.SubscriptionState import has_all_fetch_positions [as 别名]
#.........这里部分代码省略.........
# responses to enable pipelining while the user is handling the
# fetched records.
self._fetcher.init_fetches()
return records
elapsed_ms = (time.time() - start) * 1000
remaining = timeout_ms - elapsed_ms
if remaining <= 0:
return {}
def _poll_once(self, timeout_ms):
"""
Do one round of polling. In addition to checking for new data, this does
any needed heart-beating, auto-commits, and offset updates.
Arguments:
timeout_ms (int): The maximum time in milliseconds to block
Returns:
dict: map of topic to list of records (may be empty)
"""
if self.config["api_version"] >= (0, 8, 2):
# TODO: Sub-requests should take into account the poll timeout (KAFKA-1894)
self._coordinator.ensure_coordinator_known()
if self.config["api_version"] >= (0, 9):
# ensure we have partitions assigned if we expect to
if self._subscription.partitions_auto_assigned():
self._coordinator.ensure_active_group()
# fetch positions if we have partitions we're subscribed to that we
# don't know the offset for
if not self._subscription.has_all_fetch_positions():
self._update_fetch_positions(self._subscription.missing_fetch_positions())
# init any new fetches (won't resend pending fetches)
records = self._fetcher.fetched_records()
# if data is available already, e.g. from a previous network client
# poll() call to commit, then just return it immediately
if records:
return records
self._fetcher.init_fetches()
self._client.poll(timeout_ms)
return self._fetcher.fetched_records()
def position(self, partition):
"""Get the offset of the next record that will be fetched
Arguments:
partition (TopicPartition): partition to check
"""
assert self._subscription.is_assigned(partition)
offset = self._subscription.assignment[partition].position
if offset is None:
self._update_fetch_positions(partition)
offset = self._subscription.assignment[partition].position
return offset
def pause(self, *partitions):
"""Suspend fetching from the requested partitions.
Future calls to poll() will not return any records from these partitions
示例2: KafkaConsumer
# 需要导入模块: from kafka.consumer.subscription_state import SubscriptionState [as 别名]
# 或者: from kafka.consumer.subscription_state.SubscriptionState import has_all_fetch_positions [as 别名]
#.........这里部分代码省略.........
remaining = timeout_ms
while True:
records = self._poll_once(remaining, max_records)
if records:
return records
elapsed_ms = (time.time() - start) * 1000
remaining = timeout_ms - elapsed_ms
if remaining <= 0:
return {}
def _poll_once(self, timeout_ms, max_records):
"""
Do one round of polling. In addition to checking for new data, this does
any needed heart-beating, auto-commits, and offset updates.
Arguments:
timeout_ms (int): The maximum time in milliseconds to block
Returns:
dict: map of topic to list of records (may be empty)
"""
if self._use_consumer_group():
self._coordinator.ensure_coordinator_known()
self._coordinator.ensure_active_group()
# 0.8.2 brokers support kafka-backed offset storage via group coordinator
elif self.config['group_id'] is not None and self.config['api_version'] >= (0, 8, 2):
self._coordinator.ensure_coordinator_known()
# fetch positions if we have partitions we're subscribed to that we
# don't know the offset for
if not self._subscription.has_all_fetch_positions():
self._update_fetch_positions(self._subscription.missing_fetch_positions())
# if data is available already, e.g. from a previous network client
# poll() call to commit, then just return it immediately
records, partial = self._fetcher.fetched_records(max_records)
if records:
# before returning the fetched records, we can send off the
# next round of fetches and avoid block waiting for their
# responses to enable pipelining while the user is handling the
# fetched records.
if not partial:
self._fetcher.send_fetches()
return records
# send any new fetches (won't resend pending fetches)
self._fetcher.send_fetches()
self._client.poll(timeout_ms=timeout_ms, sleep=True)
records, _ = self._fetcher.fetched_records(max_records)
return records
def position(self, partition):
"""Get the offset of the next record that will be fetched
Arguments:
partition (TopicPartition): partition to check
Returns:
int: offset
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
示例3: AIOKafkaConsumer
# 需要导入模块: from kafka.consumer.subscription_state import SubscriptionState [as 别名]
# 或者: from kafka.consumer.subscription_state.SubscriptionState import has_all_fetch_positions [as 别名]
#.........这里部分代码省略.........
def unsubscribe(self):
"""Unsubscribe from all topics and clear all assigned partitions."""
self._subscription.unsubscribe()
self._client.set_topics([])
log.debug(
"Unsubscribed all topics or patterns and assigned partitions")
@asyncio.coroutine
def _update_fetch_positions(self, partitions):
"""
Set the fetch position to the committed position (if there is one)
or reset it using the offset reset policy the user has configured.
Arguments:
partitions (List[TopicPartition]): The partitions that need
updating fetch positions
Raises:
NoOffsetForPartitionError: If no offset is stored for a given
partition and no offset reset policy is defined
"""
if self._group_id is not None:
# refresh commits for all assigned partitions
yield from self._coordinator.refresh_committed_offsets()
# then do any offset lookups in case some positions are not known
yield from self._fetcher.update_fetch_positions(partitions)
def _on_change_subscription(self):
"""This is `group rebalanced` signal handler for update fetch positions
of assigned partitions"""
# fetch positions if we have partitions we're subscribed
# to that we don't know the offset for
if not self._subscription.has_all_fetch_positions():
ensure_future(self._update_fetch_positions(
self._subscription.missing_fetch_positions()),
loop=self._loop)
@asyncio.coroutine
def getone(self, *partitions):
"""
Get one message from Kafka
If no new messages prefetched, this method will wait for it
Arguments:
partitions (List[TopicPartition]): Optional list of partitions to
return from. If no partitions specified then returned message
will be from any partition, which consumer is subscribed to.
Returns:
ConsumerRecord
Will return instance of
.. code:: python
collections.namedtuple(
"ConsumerRecord",
["topic", "partition", "offset", "key", "value"])
Example usage:
.. code:: python
while True: