本文整理汇总了Python中aiokafka.producer.AIOKafkaProducer.start方法的典型用法代码示例。如果您正苦于以下问题:Python AIOKafkaProducer.start方法的具体用法?Python AIOKafkaProducer.start怎么用?Python AIOKafkaProducer.start使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类aiokafka.producer.AIOKafkaProducer
的用法示例。
在下文中一共展示了AIOKafkaProducer.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_producer_send_with_serializer
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_send_with_serializer(self):
def key_serializer(val):
return val.upper().encode()
def serializer(val):
return json.dumps(val).encode()
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts,
value_serializer=serializer,
key_serializer=key_serializer, acks='all',
max_request_size=1000)
yield from producer.start()
key = 'some key'
value = {'strKey': 23523.443, 23: 'STRval'}
future = yield from producer.send(self.topic, value, key=key)
resp = yield from future
partition = resp.partition
offset = resp.offset
self.assertTrue(partition in (0, 1)) # partition
future = yield from producer.send(self.topic, 'some str', key=key)
resp = yield from future
# expect the same partition bcs the same key
self.assertEqual(resp.partition, partition)
# expect offset +1
self.assertEqual(resp.offset, offset + 1)
value[23] = '*VALUE' * 800
with self.assertRaises(MessageSizeTooLargeError):
yield from producer.send(self.topic, value, key=key)
yield from producer.stop()
yield from producer.stop() # shold be Ok
示例2: test_producer_send_error
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_send_error(self):
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts,
retry_backoff_ms=100,
linger_ms=5, request_timeout_ms=400)
yield from producer.start()
@asyncio.coroutine
def mocked_send(nodeid, req):
# RequestTimedOutCode error for partition=0
return ProduceResponse[0]([(self.topic, [(0, 7, 0), (1, 0, 111)])])
with mock.patch.object(producer.client, 'send') as mocked:
mocked.side_effect = mocked_send
fut1 = yield from producer.send(self.topic, b'text1', partition=0)
fut2 = yield from producer.send(self.topic, b'text2', partition=1)
with self.assertRaises(RequestTimedOutError):
yield from fut1
resp = yield from fut2
self.assertEqual(resp.offset, 111)
@asyncio.coroutine
def mocked_send_with_sleep(nodeid, req):
# RequestTimedOutCode error for partition=0
yield from asyncio.sleep(0.1, loop=self.loop)
return ProduceResponse[0]([(self.topic, [(0, 7, 0)])])
with mock.patch.object(producer.client, 'send') as mocked:
mocked.side_effect = mocked_send_with_sleep
with self.assertRaises(RequestTimedOutError):
future = yield from producer.send(
self.topic, b'text1', partition=0)
yield from future
yield from producer.stop()
示例3: test_get_offsets
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_get_offsets(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
subscription = SubscriptionState("earliest")
subscription.subscribe(topics=("topic1",))
coordinator = GroupCoordinator(client, subscription, loop=self.loop, group_id="getoffsets-group")
yield from self.wait_topic(client, "topic1")
producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=self.hosts)
yield from producer.start()
yield from producer.send("topic1", b"first msg", partition=0)
yield from producer.send("topic1", b"second msg", partition=1)
yield from producer.send("topic1", b"third msg", partition=1)
yield from producer.stop()
yield from coordinator.ensure_active_group()
offsets = {
TopicPartition("topic1", 0): OffsetAndMetadata(1, ""),
TopicPartition("topic1", 1): OffsetAndMetadata(2, ""),
}
yield from coordinator.commit_offsets(offsets)
self.assertEqual(subscription.all_consumed_offsets(), {})
subscription.seek(("topic1", 0), 0)
subscription.seek(("topic1", 1), 0)
yield from coordinator.refresh_committed_offsets()
self.assertEqual(subscription.assignment[("topic1", 0)].committed, 1)
self.assertEqual(subscription.assignment[("topic1", 1)].committed, 2)
yield from coordinator.close()
yield from client.close()
示例4: test_producer_send
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_send(self):
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts)
yield from producer.start()
self.add_cleanup(producer.stop)
with self.assertRaises(TypeError):
yield from producer.send(self.topic, 'hello, Kafka!', partition=0)
future = yield from producer.send(
self.topic, b'hello, Kafka!', partition=0)
resp = yield from future
self.assertEqual(resp.topic, self.topic)
self.assertTrue(resp.partition in (0, 1))
self.assertEqual(resp.offset, 0)
fut = yield from producer.send(self.topic, b'second msg', partition=1)
resp = yield from fut
self.assertEqual(resp.partition, 1)
future = yield from producer.send(self.topic, b'value', key=b'KEY')
resp = yield from future
self.assertTrue(resp.partition in (0, 1))
resp = yield from producer.send_and_wait(self.topic, b'value')
self.assertTrue(resp.partition in (0, 1))
yield from producer.stop()
with self.assertRaises(ProducerClosed):
yield from producer.send(self.topic, b'value', key=b'KEY')
示例5: test_producer_send
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_send(self):
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts)
yield from producer.start()
yield from self.wait_topic(producer.client, self.topic)
with self.assertRaisesRegexp(AssertionError, 'value must be bytes'):
yield from producer.send(self.topic, 'hello, Kafka!')
future = yield from producer.send(self.topic, b'hello, Kafka!')
resp = yield from future
self.assertEqual(resp.topic, self.topic)
self.assertTrue(resp.partition in (0, 1))
self.assertEqual(resp.offset, 0)
fut = yield from producer.send(self.topic, b'second msg', partition=1)
resp = yield from fut
self.assertEqual(resp.partition, 1)
future = yield from producer.send(self.topic, b'value', key=b'KEY')
resp = yield from future
self.assertTrue(resp.partition in (0, 1))
resp = yield from producer.send_and_wait(self.topic, b'value')
self.assertTrue(resp.partition in (0, 1))
yield from producer.stop()
with self.assertRaises(ProducerClosed):
yield from producer.send(self.topic, b'value', key=b'KEY')
示例6: test_producer_correct_time_returned
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_correct_time_returned(self):
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts)
yield from producer.start()
self.add_cleanup(producer.stop)
send_time = (time.time() * 1000)
res = yield from producer.send_and_wait(
"XXXX", b'text1', partition=0)
self.assertLess(res.timestamp - send_time, 1000) # 1s
res = yield from producer.send_and_wait(
"XXXX", b'text1', partition=0, timestamp_ms=123123123)
self.assertEqual(res.timestamp, 123123123)
expected_timestamp = 999999999
@asyncio.coroutine
def mocked_send(*args, **kw):
# There's no easy way to set LOG_APPEND_TIME on server, so use this
# hack for now.
return ProduceResponse[2](
topics=[
('XXXX', [(0, 0, 0, expected_timestamp)])],
throttle_time_ms=0)
with mock.patch.object(producer.client, 'send') as mocked:
mocked.side_effect = mocked_send
res = yield from producer.send_and_wait(
"XXXX", b'text1', partition=0)
self.assertEqual(res.timestamp_type, LOG_APPEND_TIME)
self.assertEqual(res.timestamp, expected_timestamp)
示例7: test_producer_send_with_compression
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_send_with_compression(self):
with self.assertRaises(ValueError):
producer = AIOKafkaProducer(
loop=self.loop, compression_type='my_custom')
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts,
compression_type='gzip')
yield from producer.start()
yield from self.wait_topic(producer.client, self.topic)
# short message will not be compressed
future = yield from producer.send(
self.topic, b'this msg is too short for compress')
resp = yield from future
self.assertEqual(resp.topic, self.topic)
self.assertTrue(resp.partition in (0, 1))
# now message will be compressed
resp = yield from producer.send_and_wait(
self.topic, b'large_message-'*100)
self.assertEqual(resp.topic, self.topic)
self.assertTrue(resp.partition in (0, 1))
yield from producer.stop()
示例8: send_messages
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def send_messages(self, partition, messages, *, topic=None,
timestamp_ms=None, return_inst=False, headers=None):
topic = topic or self.topic
ret = []
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts)
yield from producer.start()
try:
yield from self.wait_topic(producer.client, topic)
for msg in messages:
if isinstance(msg, str):
msg = msg.encode()
elif isinstance(msg, int):
msg = str(msg).encode()
future = yield from producer.send(
topic, msg, partition=partition,
timestamp_ms=timestamp_ms, headers=headers)
resp = yield from future
self.assertEqual(resp.topic, topic)
self.assertEqual(resp.partition, partition)
if return_inst:
ret.append(resp)
else:
ret.append(msg)
finally:
yield from producer.stop()
return ret
示例9: test_producer_indempotence_not_supported
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_indempotence_not_supported(self):
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts,
enable_idempotence=True)
producer
with self.assertRaises(UnsupportedVersionError):
yield from producer.start()
yield from producer.stop()
示例10: test_producer_send_noack
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_send_noack(self):
producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=self.hosts, acks=0)
yield from producer.start()
yield from self.wait_topic(producer.client, self.topic)
fut1 = yield from producer.send(self.topic, b"hello, Kafka!", partition=0)
fut2 = yield from producer.send(self.topic, b"hello, Kafka!", partition=1)
done, _ = yield from asyncio.wait([fut1, fut2], loop=self.loop)
for item in done:
self.assertEqual(item.result(), None)
示例11: test_producer_send_with_headers
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_send_with_headers(self):
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts)
yield from producer.start()
self.add_cleanup(producer.stop)
fut = yield from producer.send(
self.topic, b'msg', partition=0, headers=[("type", b"Normal")])
resp = yield from fut
self.assertEqual(resp.partition, 0)
示例12: test_producer_send_with_headers_raise_error
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_send_with_headers_raise_error(self):
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts)
yield from producer.start()
self.add_cleanup(producer.stop)
with self.assertRaises(UnsupportedVersionError):
yield from producer.send(
self.topic, b'msg', partition=0,
headers=[("type", b"Normal")])
示例13: test_producer_send_batch
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_send_batch(self):
key = b'test key'
value = b'test value'
max_batch_size = 10000
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts,
max_batch_size=max_batch_size)
yield from producer.start()
partitions = yield from producer.partitions_for(self.topic)
partition = partitions.pop()
# silly method to find current offset for this partition
resp = yield from producer.send_and_wait(
self.topic, value=b'discovering offset', partition=partition)
offset = resp.offset
# only fills up to its limits, then returns None
batch = producer.create_batch()
self.assertEqual(batch.record_count(), 0)
num = 0
while True:
metadata = batch.append(key=key, value=value, timestamp=None)
if metadata is None:
break
num += 1
self.assertTrue(num > 0)
self.assertEqual(batch.record_count(), num)
# batch gets properly sent
future = yield from producer.send_batch(
batch, self.topic, partition=partition)
resp = yield from future
self.assertEqual(resp.topic, self.topic)
self.assertEqual(resp.partition, partition)
self.assertEqual(resp.offset, offset + 1)
# batch accepts a too-large message if it's the first
too_large = b'm' * (max_batch_size + 1)
batch = producer.create_batch()
metadata = batch.append(key=None, value=too_large, timestamp=None)
self.assertIsNotNone(metadata)
# batch rejects a too-large message if it's not the first
batch = producer.create_batch()
batch.append(key=None, value=b"short", timestamp=None)
metadata = batch.append(key=None, value=too_large, timestamp=None)
self.assertIsNone(metadata)
yield from producer.stop()
# batch can't be sent after closing time
with self.assertRaises(ProducerClosed):
yield from producer.send_batch(
batch, self.topic, partition=partition)
示例14: test_producer_warn_unclosed
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_warn_unclosed(self):
producer = AIOKafkaProducer(
loop=self.loop, bootstrap_servers=self.hosts)
producer_ref = weakref.ref(producer)
yield from producer.start()
with self.silence_loop_exception_handler():
with self.assertWarnsRegex(
ResourceWarning, "Unclosed AIOKafkaProducer"):
del producer
gc.collect()
# Assure that the reference was properly collected
self.assertIsNone(producer_ref())
示例15: test_producer_send_with_compression
# 需要导入模块: from aiokafka.producer import AIOKafkaProducer [as 别名]
# 或者: from aiokafka.producer.AIOKafkaProducer import start [as 别名]
def test_producer_send_with_compression(self):
with self.assertRaises(ValueError):
producer = AIOKafkaProducer(loop=self.loop, compression_type="my_custom")
producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=self.hosts, compression_type="gzip")
yield from producer.start()
yield from self.wait_topic(producer.client, self.topic)
future = yield from producer.send(self.topic, b"this msg is compressed by client")
resp = yield from future
self.assertEqual(resp.topic, self.topic)
self.assertTrue(resp.partition in (0, 1))
yield from producer.stop()