本文整理匯總了Python中kafka.errors.KafkaTimeoutError方法的典型用法代碼示例。如果您正苦於以下問題:Python errors.KafkaTimeoutError方法的具體用法?Python errors.KafkaTimeoutError怎麽用?Python errors.KafkaTimeoutError使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類kafka.errors
的用法示例。
在下文中一共展示了errors.KafkaTimeoutError方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: write
# 需要導入模塊: from kafka import errors [as 別名]
# 或者: from kafka.errors import KafkaTimeoutError [as 別名]
def write(self, name, **data):
"""
Write the metric to kafka
Args:
name (str): The name of the metric to write
data (dict): Additional data to store with the metric
"""
data["name"] = name
if not ("timestamp" in data):
data["timestamp"] = datetime.utcnow()
try:
self.producer.send(topic=self.topic, value=data)
self.producer.flush()
except (KafkaTimeoutError, NoBrokersAvailable) as exc:
logger.warning('writing metric %r failure %r', data, exc)
示例2: send
# 需要導入模塊: from kafka import errors [as 別名]
# 或者: from kafka.errors import KafkaTimeoutError [as 別名]
def send(self, logentry):
try:
# send() has a (max_block_ms) timeout and get() has a (max_block_ms) timeout
# for an upper bound of 2x(max_block_ms) before guaranteed delivery
future = self._producer.send(
self.topic, logentry.to_dict(), timestamp_ms=epoch_ms(logentry.datetime)
)
record_metadata = future.get(timeout=self.max_block_ms)
assert future.succeeded
except KafkaTimeoutError as kte:
logger.exception("KafkaLogsProducer timeout sending log to Kafka: %s", kte)
raise LogSendException("KafkaLogsProducer timeout sending log to Kafka: %s" % kte)
except KafkaError as ke:
logger.exception("KafkaLogsProducer error sending log to Kafka: %s", ke)
raise LogSendException("KafkaLogsProducer error sending log to Kafka: %s" % ke)
except Exception as e:
logger.exception("KafkaLogsProducer exception sending log to Kafka: %s", e)
raise LogSendException("KafkaLogsProducer exception sending log to Kafka: %s" % e)
示例3: process_item
# 需要導入模塊: from kafka import errors [as 別名]
# 或者: from kafka.errors import KafkaTimeoutError [as 別名]
def process_item(self, item, spider):
try:
self.logger.debug("Processing item in KafkaPipeline")
datum = dict(item)
datum["timestamp"] = self._get_time()
prefix = self.topic_prefix
try:
if self.use_base64:
datum['body'] = base64.b64encode(bytes(datum['body'], 'utf-8'))
message = ujson.dumps(datum, sort_keys=True)
except:
message = 'json failed to parse'
firehose_topic = "{prefix}.crawled_firehose".format(prefix=prefix)
future = self.producer.send(firehose_topic, message)
future.add_callback(self._kafka_success, datum, spider)
future.add_errback(self._kafka_failure, datum, spider)
if self.appid_topics:
appid_topic = "{prefix}.crawled_{appid}".format(
prefix=prefix, appid=datum["appid"])
future2 = self.producer.send(appid_topic, message)
future2.add_callback(self._kafka_success, datum, spider)
future2.add_errback(self._kafka_failure, datum, spider)
except KafkaTimeoutError:
self.logger.warning("Caught KafkaTimeoutError exception")
return item
示例4: test_process_item
# 需要導入模塊: from kafka import errors [as 別名]
# 或者: from kafka.errors import KafkaTimeoutError [as 別名]
def test_process_item(self, e):
item = self._get_item()
spider = MagicMock()
spider.name = "link"
# test normal send, no appid topics
self.pipe.process_item(item, spider)
expected = '{"appid":"app","attrs":{},"body":"text","crawlid":"crawlid","links":[],"request_headers":{},"response_headers":{},"response_url":"http:\\/\\/dumb.com","status_code":200,"status_msg":"OK","timestamp":"the time","url":"http:\\/\\/dumb.com"}'
self.pipe.producer.send.assert_called_once_with('prefix.crawled_firehose',
expected)
self.pipe.producer.send.reset_mock()
# test normal send, with appids
item = self._get_item()
self.pipe.appid_topics = True
self.pipe.process_item(item, spider)
self.pipe.producer.send.assert_called_with('prefix.crawled_app',
expected)
self.pipe.producer.send.reset_mock()
# test base64 encode
item = self._get_item()
self.pipe.appid_topics = False
self.pipe.use_base64 = True
self.pipe.process_item(item, spider)
expected = '{"appid":"app","attrs":{},"body":"dGV4dA==","crawlid":"crawlid","links":[],"request_headers":{},"response_headers":{},"response_url":"http:\\/\\/dumb.com","status_code":200,"status_msg":"OK","timestamp":"the time","url":"http:\\/\\/dumb.com"}'
self.pipe.producer.send.assert_called_once_with('prefix.crawled_firehose',
expected)
# test kafka exception
item = self._get_item()
copy = deepcopy(item)
copy['success'] = False
copy['exception'] = 'traceback'
# send should not crash the pipeline
self.pipe.producer.send = MagicMock(side_effect=KafkaTimeoutError('bad kafka'))
ret_val = self.pipe.process_item(item, spider)
示例5: bulk_write
# 需要導入模塊: from kafka import errors [as 別名]
# 或者: from kafka.errors import KafkaTimeoutError [as 別名]
def bulk_write(self, metrics):
"""
Write multiple metrics to kafka in one request
Args:
metrics (list):
"""
try:
for metric in metrics:
self.producer.send(self.topic, metric)
self.producer.flush()
except (KafkaTimeoutError, NoBrokersAvailable) as exc:
logger.warning('bulk_write metrics %r failure %r', metrics, exc)
示例6: test_write_error_warning
# 需要導入模塊: from kafka import errors [as 別名]
# 或者: from kafka.errors import KafkaTimeoutError [as 別名]
def test_write_error_warning(self, mocked_logger):
transport_error = KafkaTimeoutError('mocked error')
es_index_error_ctx = mock.patch(
'time_execution.backends.kafka.KafkaProducer.send', side_effect=transport_error
)
frozen_time_ctx = freeze_time('2016-07-13')
with es_index_error_ctx, frozen_time_ctx:
self.backend.write(name='test:metric', value=None)
mocked_logger.warning.assert_called_once_with(
'writing metric %r failure %r',
{'timestamp': datetime(2016, 7, 13), 'value': None, 'name': 'test:metric'},
transport_error,
)
示例7: test_bulk_write_error
# 需要導入模塊: from kafka import errors [as 別名]
# 或者: from kafka.errors import KafkaTimeoutError [as 別名]
def test_bulk_write_error(self, mocked_logger):
transport_error = KafkaTimeoutError('mocked error')
es_index_error_ctx = mock.patch(
'time_execution.backends.kafka.KafkaProducer.send', side_effect=transport_error
)
metrics = [1, 2, 3]
with es_index_error_ctx:
self.backend.bulk_write(metrics)
mocked_logger.warning.assert_called_once_with('bulk_write metrics %r failure %r', metrics, transport_error)
示例8: getResultForException
# 需要導入模塊: from kafka import errors [as 別名]
# 或者: from kafka.errors import KafkaTimeoutError [as 別名]
def getResultForException(e):
if isinstance(e, KafkaTimeoutError):
return {'error': 'Timed out communicating with Message Hub'}
elif isinstance(e, AuthenticationFailedError):
return {'error': 'Authentication failed'}
elif isinstance(e, NoBrokersAvailable):
return {'error': 'No brokers available. Check that your supplied brokers are correct and available.'}
else:
return {'error': '{}'.format(e)}
示例9: produce_message
# 需要導入模塊: from kafka import errors [as 別名]
# 或者: from kafka.errors import KafkaTimeoutError [as 別名]
def produce_message(self, *, topic: str, key: bytes, value: bytes, partition: int = None) -> dict:
prod = None
try:
prod = await self.get_producer()
result = await asyncio.wait_for(
fut=prod.send_and_wait(topic, key=key, value=value, partition=partition),
loop=self.loop,
timeout=self.kafka_timeout
)
return {
"offset": result.offset if result else -1,
"partition": result.topic_partition.partition if result else 0
}
except AssertionError as e:
self.log.exception("Invalid data")
return {"error_code": 1, "error": str(e)}
except (KafkaTimeoutError, asyncio.TimeoutError):
self.log.exception("Timed out waiting for publisher")
# timeouts are retriable
return {"error_code": 1, "error": "timed out waiting to publish message"}
except BrokerResponseError as e:
self.log.exception(e)
resp = {"error_code": 1, "error": e.description}
if hasattr(e, "retriable") and e.retriable:
resp["error_code"] = 2
return resp
finally:
if prod:
await self.producer_queue.put(prod)