本文整理汇总了Python中confluent_kafka.KafkaError._PARTITION_EOF属性的典型用法代码示例。如果您正苦于以下问题:Python KafkaError._PARTITION_EOF属性的具体用法?Python KafkaError._PARTITION_EOF怎么用?Python KafkaError._PARTITION_EOF使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类confluent_kafka.KafkaError
的用法示例。
在下文中一共展示了KafkaError._PARTITION_EOF属性的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: start
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def start(self):
self._logger.info("Listening topic:{0}".format(self.kafka_consumer.Topic))
consumer = self.kafka_consumer.start()
try:
while True:
message = consumer.poll(timeout=1.0)
if message is None:
continue
if not message.error():
self._new_file(message.value().decode('utf-8'))
elif message.error():
if message.error().code() == KafkaError._PARTITION_EOF:
continue
elif message.error:
raise KafkaException(message.error())
except KeyboardInterrupt:
sys.stderr.write('%% Aborted by user\n')
consumer.close()
示例2: start
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def start(self):
self._logger.info("Listening topic:{0}".format(self.kafka_consumer.Topic))
consumer = self.kafka_consumer.start()
try:
while True:
message = consumer.poll(timeout=1.0)
if message is None:
continue
if not message.error():
self._new_file(message.value().decode('utf-8'))
elif message.error():
if message.error().code() == KafkaError._PARTITION_EOF:
continue
elif message.error:
raise KafkaException(message.error())
except KeyboardInterrupt:
sys.stderr.write('%% Aborted by user\n')
consumer.close()
示例3: poll_requests
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def poll_requests(self, poll_timeout):
""" Get the next batch of records """
# The current python kafka client gives us messages one by one,
# but for better throughput we want to process many records at once.
# Keep polling until we get no more records out.
records = []
record = self.consumer.poll(poll_timeout)
while record is not None:
if not record.error():
self.log.debug('Received message at offset: %d', record.offset())
records.append(record)
record = self.consumer.poll(0.)
elif record.error().code() == KafkaError._PARTITION_EOF:
record = self.consumer.poll(0.)
elif record.error():
self.log.error('Record error received: %s', record.error())
return records
示例4: start_consuming
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def start_consuming(self):
self.subscribe()
try:
while not self.terminate:
msg = self.consumer.poll(2.0)
if msg is None:
continue
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
continue
else:
console_out(msg.error(), self.actor)
break
self.msg_monitor.append(msg.value(), self.consumer_id, self.actor)
console_out("Consumption terminated", self.actor)
self.consumer.close()
except Exception as e:
console_out("Consumption terminated due to error", self.actor)
template = "An exception of type {0} occurred. Arguments:{1!r}"
message = template.format(type(e).__name__, e.args)
console_out(message, self.actor)
示例5: main
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def main():
batch = set()
try:
while True:
msg = consumer.poll(timeout=0.1)
if msg:
batch = process_message(msg, config['CONSUMER_BATCH_SIZE'])
elif msg is None:
print('No message')
elif not msg.error():
print('Received message: {}'.format(msg.value()))
elif msg.error().code() == KafkaError._PARTITION_EOF:
print('End of partition reached {}, {}'.format(msg.topic(), msg.partition()))
else:
print('Error occurred: {}'.format(msg.error().str()))
except KeyboardInterrupt:
pass
except SerializerError as e:
print('Message deserialization failed for {msg}: {e}'.format(msg=msg, e=e))
finally:
persist_messages(batch)
consumer.close()
示例6: __init__
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def __init__(self, servers, group, topics, json=False, wrap=False, interval=1):
c = Consumer({
'bootstrap.servers': servers,
'group.id': group,
'default.topic.config': {
'auto.offset.reset': 'smallest'
}
})
if not isinstance(topics, list):
topics = [topics]
c.subscribe(topics)
async def _listen(consumer=c, json=json, wrap=wrap, interval=interval):
while True:
msg = consumer.poll(interval)
if msg is None:
continue
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
continue
else:
print(msg.error())
break
msg = msg.value().decode('utf-8')
if not msg:
break
if json:
msg = JSON.loads(msg)
if wrap:
msg = [msg]
yield msg
super().__init__(foo=_listen)
self._name = 'Kafka'
示例7: handle_kafka_error
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def handle_kafka_error(self, msg): # pragma: no cover
"""Handle an error in kafka."""
if msg.error().code() == KafkaError._PARTITION_EOF:
# End of partition event
self.logger.info('%% %s [%d] reached end at offset %d\n',
msg.topic(), msg.partition(), msg.offset())
else:
# Error
raise KafkaException(msg.error())
示例8: test_listen_for_msg_loop
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def test_listen_for_msg_loop(self, mock_consumer, mock_listen):
"""Test that the message loop only calls listen for messages on valid messages."""
msg_list = [
None,
MockMessage(offset=1),
MockMessage(offset=2, error=MockError(KafkaError._PARTITION_EOF)),
MockMessage(offset=3, error=MockError(KafkaError._MSG_TIMED_OUT)),
]
mock_consumer.return_value = MockKafkaConsumer(msg_list)
with patch("itertools.count", side_effect=[[0, 1, 2, 3]]): # mocking the infinite loop
with self.assertLogs(logger="masu.external.kafka_msg_handler", level=logging.WARNING):
msg_handler.listen_for_messages_loop()
mock_listen.assert_called_once()
示例9: on_next
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def on_next(self, value):
if not value.error():
print(value)
self.ws.emit('price_event', value) # , namespace='live')
elif value.error().code() != KafkaError._PARTITION_EOF:
running = False
示例10: consume
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def consume(self):
# Type: () -> Any
"""
Consume messages from a give list of topic
:return:
"""
records = []
start = datetime.now()
try:
while True:
msg = self.consumer.poll(timeout=self.consumer_poll_timeout)
end = datetime.now()
# The consumer exceeds consume timeout
if (end - start) > timedelta(seconds=self.consumer_total_timeout):
# Exceed the consume timeout
break
if msg is None:
continue
if msg.error():
# Hit the EOF of partition
if msg.error().code() == KafkaError._PARTITION_EOF:
continue
else:
raise KafkaException(msg.error())
else:
records.append(msg.value())
except Exception as e:
LOGGER.exception(e)
finally:
return records
示例11: read_all_msgs
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def read_all_msgs(consumer):
"""
Consumes all messages in the consumer assignment.
This method assumes the consumer has not already read all of the
messages available in a partition.
:param consumer:
:returns: total messages read
:rtype: int
"""
msg_cnt = 0
eof = {}
print("=== Draining {} ===".format(consumer.assignment()))
while (True):
msg = consumer.poll(timeout=1.0)
if msg is None:
continue
topic, partition = msg.topic(), msg.partition()
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
eof[(topic, partition)] = True
if len(eof) == len(consumer.assignment()):
break
continue
eof.pop((topic, partition), None)
msg_cnt += 1
return msg_cnt
示例12: _handle_error
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def _handle_error(msg):
if not msg.error():
return
# Error or event
if msg.error().code() == KafkaError._PARTITION_EOF:
# End of partition event
logging.info('%s [%d] reached end at offset %d with key %s\n',
msg.topic(), msg.partition(), msg.offset(), str(msg.key()))
else:
raise EncoderError(msg.error())
示例13: kafka
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def kafka(callback, servers, group, topics, json=False, wrap=False, interval=1):
'''Connect to kafka server and pipe results through the callback
Args:
callback (callable): function to call on websocket data
servers (list): kafka bootstrap servers
group (str): kafka group id
topics (list): list of kafka topics to connect to
json (bool): load websocket data as json
wrap (bool): wrap result in a list
interval (int): socketio wai interval
'''
c = Consumer({
'bootstrap.servers': servers,
'group.id': group,
'default.topic.config': {
'auto.offset.reset': 'smallest'
}
})
if not isinstance(topics, list):
topics = [topics]
c.subscribe(topics)
def _listen(consumer, json, wrap, interval):
while True:
msg = consumer.poll(interval)
if msg is None:
continue
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
continue
else:
break
msg = msg.value().decode('utf-8')
if not msg:
break
if json:
msg = load_json(msg)
if wrap:
msg = [msg]
callback(msg)
示例14: __pollForMessages
# 需要导入模块: from confluent_kafka import KafkaError [as 别名]
# 或者: from confluent_kafka.KafkaError import _PARTITION_EOF [as 别名]
def __pollForMessages(self):
messages = []
totalPayloadSize = 0
batchMessages = True
if self.__shouldRun():
while batchMessages and (self.secondsSinceLastPoll() < 2):
if self.queuedMessage != None:
logging.debug('[{}] Handling message left over from last batch.'.format(self.trigger))
message = self.queuedMessage
self.queuedMessage = None
else:
message = self.consumer.poll(1.0)
if self.secondsSinceLastPoll() < 0:
logging.info('[{}] Completed first poll'.format(self.trigger))
if (message is not None):
if not message.error():
logging.debug("Consumed message: {}".format(str(message)))
messageSize = self.__sizeMessage(message)
if totalPayloadSize + messageSize > payload_limit:
if len(messages) == 0:
logging.error('[{}] Single message at offset {} exceeds payload size limit. Skipping this message!'.format(self.trigger, message.offset()))
self.consumer.commit(message=message, async=False)
else:
logging.debug('[{}] Message at offset {} would cause payload to exceed the size limit. Queueing up for the next round...'.format(self.trigger, message.offset()))
self.queuedMessage = message
# in any case, we need to stop batching now
batchMessages = False
else:
totalPayloadSize += messageSize
messages.append(message)
elif message.error().code() != KafkaError._PARTITION_EOF:
logging.error('[{}] Error polling: {}'.format(self.trigger, message.error()))
batchMessages = False
else:
logging.debug('[{}] No more messages. Stopping batch op.'.format(self.trigger))
batchMessages = False
else:
logging.debug('[{}] message was None. Stopping batch op.'.format(self.trigger))
batchMessages = False
logging.debug('[{}] Completed poll'.format(self.trigger))
if len(messages) > 0:
logging.info("[{}] Found {} messages with a total size of {} bytes".format(self.trigger, len(messages), totalPayloadSize))
self.updateLastPoll()
return messages
# decide whether or not to disable a trigger based on the status code returned
# from firing the trigger. Specifically, disable on all 4xx status codes
# except 408 (gateway timeout), 409 (document update conflict), and 429 (throttle)