本文整理汇总了Python中kafka.errors.KafkaError方法的典型用法代码示例。如果您正苦于以下问题:Python errors.KafkaError方法的具体用法?Python errors.KafkaError怎么用?Python errors.KafkaError使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.errors
的用法示例。
在下文中一共展示了errors.KafkaError方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: send
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def send(self, logentry):
try:
# send() has a (max_block_ms) timeout and get() has a (max_block_ms) timeout
# for an upper bound of 2x(max_block_ms) before guaranteed delivery
future = self._producer.send(
self.topic, logentry.to_dict(), timestamp_ms=epoch_ms(logentry.datetime)
)
record_metadata = future.get(timeout=self.max_block_ms)
assert future.succeeded
except KafkaTimeoutError as kte:
logger.exception("KafkaLogsProducer timeout sending log to Kafka: %s", kte)
raise LogSendException("KafkaLogsProducer timeout sending log to Kafka: %s" % kte)
except KafkaError as ke:
logger.exception("KafkaLogsProducer error sending log to Kafka: %s", ke)
raise LogSendException("KafkaLogsProducer error sending log to Kafka: %s" % ke)
except Exception as e:
logger.exception("KafkaLogsProducer exception sending log to Kafka: %s", e)
raise LogSendException("KafkaLogsProducer exception sending log to Kafka: %s" % e)
示例2: subscribe_to_topics
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def subscribe_to_topics(self, topics, **kwargs):
"""
Subscribe to list of specified topics.
Arguments:
topics(list): list of topic names to subscribe
pattern(list): list of topic name patterns to subscribe
listener(func): callback function
Returns:
result(bool) : False if exception occures, True otherwise
"""
pattern = kwargs.get("pattern", None)
listener = kwargs.get("listener", None)
print_info("subscribe to topics {}".format(topics))
try:
self.kafka_consumer.subscribe(topics=topics,
pattern=pattern,
listener=listener)
result = True
except KafkaError as exc:
print_error("Exception during subscribing to topics - {}".format(exc))
result = False
return result
示例3: assign_partitions
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def assign_partitions(self, partitions):
"""
Assign partitions to consumer.
Arguments:
partitions(list) : list of [topic, partition] lists
example : [[topic1,1], [topic2,1]]
Returns:
None.
"""
print_info("assigning partitions to consumer {}".format(partitions))
topic_partitions = [TopicPartition(topic=tup[0], partition=tup[1]) for tup in partitions]
try:
self.kafka_consumer.assign(topic_partitions)
result = True
except KafkaError as exc:
print_error("Exception during assiging partitions - {}".format(exc))
result = False
return result
示例4: seek_to_position
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def seek_to_position(self, topic, partition, offset):
"""
Seek to the given offset.
Arguments:
topic(str): topic name
partition(int): partition number
offset(int): offset number
Returns:
result(bool) : False if exception occures, True otherwise
"""
print_info("seeking to position {}:{}:{}".format(topic, partition, offset))
topic_partition = TopicPartition(topic=topic, partition=partition)
try:
self.kafka_consumer.seek(partition=topic_partition, offset=offset)
result = True
except KafkaError as exc:
print_error("Exception during seek - {}".format(exc))
result = False
return result
示例5: get_topics
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def get_topics(self):
"""
Get subscribed topics of the consumer.
Arguments:
None.
Returns:
topic_list(list of lists): list of [topic, partition] lists
example : [[topic1,1], [topic2,2]]
"""
print_info("get all the topics consumer is subscribed to")
try:
topic_partitions = self.kafka_consumer.assignment()
topic_list = [[topic_partition.topic, topic_partition.partition] \
for topic_partition in topic_partitions]
except KafkaError as exc:
print_error("Exception during getting assigned partitions - {}".format(exc))
topic_list = None
return topic_list
示例6: create_partitions_in_topic
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def create_partitions_in_topic(self, partitions, **kwargs):
"""
create partitions in topic
Arguments:
partitions(list) : list of ['topic_name','num_partitions'] lists
example : [['topic1',4], ['topic2',5]]
timeout(int): timeout in milliseconds
Returns:
result(bool) : False if exception occures, True otherwise
"""
timeout = kwargs.get("timeout", None)
validate = kwargs.get("validate", False)
topic_partitions = {tup[0]:NewPartitions(total_count=tup[1]) for tup in partitions}
print_info("creating partitions in topic")
try:
self.kafka_client.create_partitions(topic_partitions=topic_partitions,
timeout_ms=timeout,
validate_only=validate)
result = True
except KafkaError as exc:
print_error("Exception during creating partitions - {}".format(exc))
result = False
return result
示例7: commit_offsets
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def commit_offsets(
self, internal_name: Tuple[str, str], content_type: str, request_data: dict, cluster_metadata: dict
):
self.log.info("Committing offsets for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
if request_data:
self._assert_has_key(request_data, "offsets", content_type)
payload = {}
for el in request_data.get("offsets", []):
for k in ["partition", "offset"]:
convert_to_int(el, k, content_type)
# If we commit for a partition that does not belong to this consumer, then the internal error raised
# is marked as retriable, and thus the commit method will remain blocked in what looks like an infinite loop
self._topic_and_partition_valid(cluster_metadata, el, content_type)
payload[TopicPartition(el["topic"], el["partition"])] = OffsetAndMetadata(el["offset"] + 1, None)
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
payload = payload or None
try:
consumer.commit(offsets=payload)
except KafkaError as e:
KarapaceBase.internal_error(message=f"error sending commit request: {e}", content_type=content_type)
empty_response()
示例8: listen_for_messages
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def listen_for_messages(msg, consumer, application_source_id): # noqa: C901
"""
Listen for Platform-Sources kafka messages.
Args:
consumer (Consumer): Kafka consumer object
application_source_id (Integer): Cost Management's current Application Source ID. Used for
kafka message filtering.
Returns:
None
"""
try:
try:
msg = get_sources_msg_data(msg, application_source_id)
offset = msg.get("offset")
partition = msg.get("partition")
except SourcesMessageError:
return
if msg:
LOG.info(f"Processing message offset: {offset} partition: {partition}")
topic_partition = TopicPartition(topic=Config.SOURCES_TOPIC, partition=partition, offset=offset)
LOG.info(f"Cost Management Message to process: {str(msg)}")
try:
with transaction.atomic():
process_message(application_source_id, msg)
consumer.commit()
except (IntegrityError, InterfaceError, OperationalError) as err:
connection.close()
LOG.error(f"{type(err).__name__}: {err}")
rewind_consumer_to_retry(consumer, topic_partition)
except SourcesHTTPClientError as err:
LOG.error(err)
rewind_consumer_to_retry(consumer, topic_partition)
except SourceNotFoundError:
LOG.warning(f"Source not found in platform sources. Skipping msg: {msg}")
consumer.commit()
except KafkaError as error:
LOG.error(f"[listen_for_messages] Kafka error encountered: {type(error).__name__}: {error}", exc_info=True)
except Exception as error:
LOG.error(f"[listen_for_messages] UNKNOWN error encountered: {type(error).__name__}: {error}", exc_info=True)
示例9: seek
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def seek(self, topic_partition):
# This isn't realistic... But it's one way to stop the consumer for our needs.
raise KafkaError("Seek to commited. Closing...")
示例10: getone
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def getone(self):
for msg in self.preloaded_messages:
return msg
raise KafkaError("Closing Mock Consumer")
示例11: test_kafka_connection_metrics_listen_for_messages
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def test_kafka_connection_metrics_listen_for_messages(self, mock_start, mock_sleep):
"""Test check_kafka_connection increments kafka connection errors on KafkaError."""
connection_errors_before = WORKER_REGISTRY.get_sample_value("kafka_connection_errors_total")
source_integration.is_kafka_connected()
connection_errors_after = WORKER_REGISTRY.get_sample_value("kafka_connection_errors_total")
self.assertEqual(connection_errors_after - connection_errors_before, 1)
示例12: send_json_data
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def send_json_data(self, params):
try:
parmas_message = json.dumps(params)
producer = self.producer
producer.send(self.kafkatopic, parmas_message.encode('utf-8'))
producer.flush()
except KafkaError as e:
print(e)
示例13: take_prediction_for_worker
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def take_prediction_for_worker(self, worker_id: str, query_id: str) -> Union[Prediction, None]:
name = f'workers_{worker_id}_{query_id}_prediction'
prediction_consumer = KafkaConsumer(name, bootstrap_servers=self.connection_url, auto_offset_reset='earliest', group_id=PREDICTIONS_QUEUE)
prediction = None
try:
prediction = next(prediction_consumer).value
prediction_consumer.commit()
prediction = pickle.loads(prediction)
except KafkaError:
pass
prediction_consumer.close()
logger.info(f'Took prediction for query "{query_id}" from worker "{worker_id}"')
return prediction
示例14: pop_queries_for_worker
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def pop_queries_for_worker(self, worker_id: str, batch_size: int) -> List[Query]:
name = f'workers_{worker_id}_queries'
query_consumer = KafkaConsumer(name, bootstrap_servers=self.connection_url, auto_offset_reset='earliest', group_id=QUERIES_QUEUE)
partition = TopicPartition(name, 0)
partitiondic = query_consumer.end_offsets([partition])
offsetend = partitiondic.get(partition, None)
if offsetend == 0:
query_consumer.close()
return []
try:
queries = []
while True:
record = next(query_consumer)
queries.append(record.value)
query_consumer.commit()
if record.offset >= offsetend-1 or len(queries) == batch_size:
break
queries = [pickle.loads(x) for x in queries]
query_consumer.close()
return queries
except KafkaError:
query_consumer.close()
return []
示例15: __init__
# 需要导入模块: from kafka import errors [as 别名]
# 或者: from kafka.errors import KafkaError [as 别名]
def __init__(self, *topics, **configs):
"""
Create Kafka Consumer object
"""
print_info("creating kafka consumer")
try:
self.kafka_consumer = KafkaConsumer(*topics, **configs)
except KafkaError as exc:
print_error("Kafka consumer - Exception during connecting to broker - {}".format(exc))