本文整理汇总了Python中kafka.consumer.SimpleConsumer.stop方法的典型用法代码示例。如果您正苦于以下问题:Python SimpleConsumer.stop方法的具体用法?Python SimpleConsumer.stop怎么用?Python SimpleConsumer.stop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.consumer.SimpleConsumer
的用法示例。
在下文中一共展示了SimpleConsumer.stop方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check
# 需要导入模块: from kafka.consumer import SimpleConsumer [as 别名]
# 或者: from kafka.consumer.SimpleConsumer import stop [as 别名]
def check(self, instance):
consumer_groups = self.read_config(instance, 'consumer_groups',
cast=self._validate_consumer_groups)
kafka_host_ports = self.read_config(instance, 'kafka_connect_str')
full_output = self.read_config(instance, 'full_output', cast=bool)
dimensions = self.read_config(instance, 'dimensions', cast=dict, optional=True)
new_dimensions = {'component': 'kafka', 'service': 'kafka'}
if dimensions is not None:
new_dimensions.update(dimensions.copy())
try:
# Connect to Kafka
kafka_conn = KafkaClient(kafka_host_ports)
# Query Kafka for consumer offsets
consumer_offsets = {}
topics = defaultdict(set)
for consumer_group, topic_partitions in consumer_groups.iteritems():
for topic, partitions in topic_partitions.iteritems():
consumer = SimpleConsumer(kafka_conn, consumer_group, topic)
# Remember the topic partitions that we've see so that we can
# look up their broker offsets later
topics[topic].update(set(partitions))
for partition in partitions:
consumer_offsets[(consumer_group, topic, partition)] = consumer.offsets[partition]
consumer.stop()
# Query Kafka for the broker offsets, done in a separate loop so only one query is done
# per topic even if multiple consumer groups watch the same topic
broker_offsets = {}
for topic, partitions in topics.items():
offset_responses = kafka_conn.send_offset_request([
OffsetRequest(topic, p, -1, 1) for p in partitions])
for resp in offset_responses:
broker_offsets[(resp.topic, resp.partition)] = resp.offsets[0]
finally:
try:
kafka_conn.close()
except Exception:
self.log.exception('Error cleaning up Kafka connection')
# Report the broker data
if full_output:
for (topic, partition), broker_offset in broker_offsets.items():
broker_dimensions = new_dimensions.copy()
broker_offset = broker_offsets.get((topic, partition))
self.gauge('kafka.broker_offset',
broker_offset,
dimensions={'topic': topic,
'partition': partition}.update(broker_dimensions))
# Report the consumer data
for (consumer_group, topic, partition), consumer_offset in consumer_offsets.items():
# Get the broker offset
broker_offset = broker_offsets.get((topic, partition))
# Report the consumer offset and lag
consumer_dimensions = new_dimensions.copy()
consumer_dimensions['topic'] = topic
consumer_dimensions['partition'] = partition
consumer_dimensions['consumer_group'] = consumer_group
if full_output:
self.gauge('kafka.consumer_offset',
consumer_offset,
dimensions={'topic': topic,
'partition': partition,
'consumer_group': consumer_group}.update(consumer_dimensions))
self.gauge('kafka.consumer_lag',
broker_offset - consumer_offset,
dimensions={'topic': topic,
'partition': partition,
'consumer_group': consumer_group}.update(consumer_dimensions))
示例2: ZKConsumer
# 需要导入模块: from kafka.consumer import SimpleConsumer [as 别名]
# 或者: from kafka.consumer.SimpleConsumer import stop [as 别名]
class ZKConsumer(object):
zk_timeout = 30
jitter_seconds = 30
broker_prefix = '/brokers/ids'
def __init__(
self,
zk_hosts,
group,
topic,
nodes,
zk_handler=None,
logger=None,
identifier=None,
**consumer_kwargs):
"""Creates a Consumer that tracks state in ZooKeeper,
rebalancing partition ownership as registered consumers change.
NOTE: this class is intended for version 0.8.1 of Kafka, where offsets
are managed by Kafka but there is no rebalancing in the protocol.
"""
if logger is None:
logger = logging.getLogger('kafka.consumer.ZKConsumer')
self.logger = logger
self.identifier = identifier
if KafkaClient is None:
raise RuntimeError("Kafka support requires cs.eyrie to be installed with the Kafka extra: install_requires= ['cs.eyrie[Kafka]']")
self.zk_handler = zk_handler
self.zk_hosts = zk_hosts
self.broker_hosts = []
self.group = group
self.topic = topic
self.zk = None
self.nodes = nodes
self.client = None
self.consumer = None
self.consumer_kwargs = consumer_kwargs
# This will kick off a cascading sequence to initialize ourselves:
# 1. Connect to ZK and pull list of Kafka brokers
# 2. Register ourselves as a consumer in ZK
# 3. Rebalance partitions across all connected consumers
self.init_zk()
def zk_session_watch(self, state):
self.logger.debug('ZK transitioned to: %s', state)
if state == KazooState.SUSPENDED:
if self.consumer is not None:
self.logger.info('Stopping Kafka consumer')
self.consumer.stop()
self.consumer = None
# Lost connection to ZK; we can't call any methods that would
# try to contact it (i.e., we can't do self.zkp.finish() )
self.zkp = None
elif state == KazooState.CONNECTED:
self.logger.info('Restarting ZK partitioner')
self.zk.handler.spawn(self.init_zkp)
def _zkp_wait(self):
handler = self.zk.handler
while 1:
if self.zkp.failed:
self.logger.warning("Lost or unable to acquire partition")
self.stop()
elif self.zkp.release:
self.zkp.release_set()
elif self.zkp.acquired:
def group_change_proxy(event):
self.logger.warn('Connected consumers changed')
if self.zkp is None:
self.logger.info('Restarting ZK partitioner')
handler.spawn(self.init_zkp)
elif self.zkp is not None and self.zkp.failed:
self.logger.warning("Lost or unable to acquire partition")
self.stop()
else:
self.logger.info('Scheduling ZK partitioner set release')
rel_greenlet = handler.spawn(self.zkp.release_set)
self.logger.info('Scheduling group re-join')
rel_greenlet.link_value(lambda greenlet: self.zkp.join_group)
if not self.nodes:
self.logger.info('Partitioner aquired; setting child watch')
result = self.zk.get_children_async(self.zkp._group_path)
result.rawlink(group_change_proxy)
# Break out of while loop to begin consuming events
break
elif self.zkp.allocating:
self.zkp.wait_for_acquire()
def init_zkp(self):
if not hasattr(self, 'zkp') or self.zkp is None:
if self.nodes:
self.zkp = StaticZKPartitioner(
self.zk, self.group, self.topic, self.nodes,
partitions_changed_cb=self.init_consumer,
logger=self.logger, identifier=self.identifier)
else:
#.........这里部分代码省略.........
示例3: KafkaConsumer
# 需要导入模块: from kafka.consumer import SimpleConsumer [as 别名]
# 或者: from kafka.consumer.SimpleConsumer import stop [as 别名]
class KafkaConsumer(KafkaBase, GeneratorBlock):
""" A block for consuming Kafka messages
"""
version = VersionProperty("1.0.0")
group = StringProperty(title='Group', default="", allow_none=False)
# use Kafka 'reasonable' value for our own message gathering and
# signal delivery
max_msg_count = IntProperty(title='Max message count',
default=AUTO_COMMIT_MSG_COUNT,
allow_none=False)
def __init__(self):
super().__init__()
self._consumer = None
self._encoded_group = None
# message loop maintenance
self._stop_message_loop_event = None
self._message_loop_thread = None
def configure(self, context):
super().configure(context)
if not len(self.group()):
raise ValueError("Group cannot be empty")
self._encoded_group = self.group().encode()
self._connect()
def start(self):
super().start()
# start gathering messages
self._stop_message_loop_event = Event()
self._message_loop_thread = spawn(self._receive_messages)
def stop(self):
# stop gathering messages
self._stop_message_loop_event.set()
self._message_loop_thread.join()
self._message_loop_thread = None
# disconnect
self._disconnect()
super().stop()
def _parse_message(self, message):
attrs = dict()
attrs["magic"] = message.message.magic
attrs["attributes"] = message.message.attributes
attrs["key"] = message.message.key
attrs["value"] = message.message.value
return Signal(attrs)
def _receive_messages(self):
while not self._stop_message_loop_event.is_set():
try:
# get kafka messages
messages = self._consumer.get_messages(
count=self.max_msg_count(), block=False)
except Exception:
self.logger.exception("Failure getting kafka messages")
continue
# if no timeout occurred, parse messages and convert to signals
if messages:
signals = []
for message in messages:
# parse and save every signal
try:
signal = self._parse_message(message)
except Exception:
self.logger.exception("Failed to parse kafka message:"
" '{0}'".format(message))
continue
signals.append(signal)
self.notify_signals(signals)
self.logger.debug("Exiting message loop")
def _connect(self):
super()._connect()
self._consumer = SimpleConsumer(self._kafka,
self._encoded_group,
self._encoded_topic)
def _disconnect(self):
if self._consumer:
self._consumer.stop()
self._consumer = None
super()._disconnect()
@property
def connected(self):
return super().connected and self._consumer