本文整理汇总了Python中kafkatest.services.verifiable_producer.VerifiableProducer类的典型用法代码示例。如果您正苦于以下问题:Python VerifiableProducer类的具体用法?Python VerifiableProducer怎么用?Python VerifiableProducer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VerifiableProducer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: move_start_offset
def move_start_offset(self):
"""We move the start offset of the topic by writing really old messages
and waiting for them to be cleaned up.
"""
producer = VerifiableProducer(self.test_context, 1, self.kafka, self.topic,
throughput=-1, enable_idempotence=True,
create_time=1000)
producer.start()
wait_until(lambda: producer.num_acked > 0,
timeout_sec=30,
err_msg="Failed to get an acknowledgement for %ds" % 30)
# Wait 8 seconds to let the topic be seeded with messages that will
# be deleted. The 8 seconds is important, since we should get 2 deleted
# segments in this period based on the configured log roll time and the
# retention check interval.
time.sleep(8)
producer.stop()
self.logger.info("Seeded topic with %d messages which will be deleted" %\
producer.num_acked)
# Since the configured check interval is 5 seconds, we wait another
# 6 seconds to ensure that at least one more cleaning so that the last
# segment is deleted. An altenate to using timeouts is to poll each
# partition until the log start offset matches the end offset. The
# latter is more robust.
time.sleep(6)
示例2: TestVerifiableProducer
class TestVerifiableProducer(Test):
"""Sanity checks on verifiable producer service class."""
def __init__(self, test_context):
super(TestVerifiableProducer, self).__init__(test_context)
self.topic = "topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk,
topics={self.topic: {"partitions": 1, "replication-factor": 1}})
self.num_messages = 1000
# This will produce to source kafka cluster
self.producer = VerifiableProducer(test_context, num_nodes=1, kafka=self.kafka, topic=self.topic,
max_messages=self.num_messages, throughput=self.num_messages/5)
def setUp(self):
self.zk.start()
self.kafka.start()
@cluster(num_nodes=3)
@parametrize(producer_version=str(LATEST_0_8_2))
@parametrize(producer_version=str(LATEST_0_9))
@parametrize(producer_version=str(LATEST_0_10_0))
@parametrize(producer_version=str(LATEST_0_10_1))
@parametrize(producer_version=str(DEV_BRANCH))
def test_simple_run(self, producer_version=DEV_BRANCH):
"""
Test that we can start VerifiableProducer on the current branch snapshot version or against the 0.8.2 jar, and
verify that we can produce a small number of messages.
"""
node = self.producer.nodes[0]
node.version = KafkaVersion(producer_version)
self.producer.start()
wait_until(lambda: self.producer.num_acked > 5, timeout_sec=5,
err_msg="Producer failed to start in a reasonable amount of time.")
# using version.vstring (distutils.version.LooseVersion) is a tricky way of ensuring
# that this check works with DEV_BRANCH
# When running VerifiableProducer 0.8.X, both the current branch version and 0.8.X should show up because of the
# way verifiable producer pulls in some development directories into its classpath
#
# If the test fails here because 'ps .. | grep' couldn't find the process it means
# the login and grep that is_version() performs is slower than
# the time it takes the producer to produce its messages.
# Easy fix is to decrease throughput= above, the good fix is to make the producer
# not terminate until explicitly killed in this case.
if node.version <= LATEST_0_8_2:
assert is_version(node, [node.version.vstring, DEV_BRANCH.vstring], logger=self.logger)
else:
assert is_version(node, [node.version.vstring], logger=self.logger)
self.producer.wait()
num_produced = self.producer.num_acked
assert num_produced == self.num_messages, "num_produced: %d, num_messages: %d" % (num_produced, self.num_messages)
示例3: SimpleConsumerShellTest
class SimpleConsumerShellTest(Test):
"""
Tests SimpleConsumerShell tool
"""
def __init__(self, test_context):
super(SimpleConsumerShellTest, self).__init__(test_context)
self.num_zk = 1
self.num_brokers = 1
self.messages_received_count = 0
self.topics = {TOPIC: {"partitions": NUM_PARTITIONS, "replication-factor": REPLICATION_FACTOR}}
self.zk = ZookeeperService(test_context, self.num_zk)
def setUp(self):
self.zk.start()
def start_kafka(self):
self.kafka = KafkaService(self.test_context, self.num_brokers, self.zk, topics=self.topics)
self.kafka.start()
def run_producer(self):
# This will produce to kafka cluster
self.producer = VerifiableProducer(
self.test_context, num_nodes=1, kafka=self.kafka, topic=TOPIC, throughput=1000, max_messages=MAX_MESSAGES
)
self.producer.start()
wait_until(
lambda: self.producer.num_acked == MAX_MESSAGES,
timeout_sec=10,
err_msg="Timeout awaiting messages to be produced and acked",
)
def start_simple_consumer_shell(self):
self.simple_consumer_shell = SimpleConsumerShell(self.test_context, 1, self.kafka, TOPIC)
self.simple_consumer_shell.start()
def test_simple_consumer_shell(self):
"""
Tests if SimpleConsumerShell is fetching expected records
:return: None
"""
self.start_kafka()
self.run_producer()
self.start_simple_consumer_shell()
# Assert that SimpleConsumerShell is fetching expected number of messages
wait_until(
lambda: self.simple_consumer_shell.get_output().count("\n") == (MAX_MESSAGES + 1),
timeout_sec=10,
err_msg="Timed out waiting to receive expected number of messages.",
)
示例4: seed_messages
def seed_messages(self, topic, num_seed_messages):
seed_timeout_sec = 10000
seed_producer = VerifiableProducer(context=self.test_context,
num_nodes=1,
kafka=self.kafka,
topic=topic,
message_validator=is_int,
max_messages=num_seed_messages,
enable_idempotence=True)
seed_producer.start()
wait_until(lambda: seed_producer.num_acked >= num_seed_messages,
timeout_sec=seed_timeout_sec,
err_msg="Producer failed to produce messages %d in %ds." %\
(self.num_seed_messages, seed_timeout_sec))
return seed_producer.acked
示例5: start_producer
def start_producer(self, max_messages, acks, timeout):
# This will produce to kafka cluster
current_acked = 0
self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=TOPIC, throughput=1000, acks=acks, max_messages=max_messages)
self.producer.start()
wait_until(lambda: acks == 0 or self.producer.num_acked >= current_acked + max_messages, timeout_sec=timeout,
err_msg="Timeout awaiting messages to be produced and acked")
示例6: test_compatibility
def test_compatibility(self, producer_version, consumer_version, compression_types, new_consumer=True, timestamp_type=None):
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=TRUNK, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
'configs': {"min.insync.replicas": 2}}})
for node in self.kafka.nodes:
if timestamp_type is not None:
node.config[config_property.MESSAGE_TIMESTAMP_TYPE] = timestamp_type
self.kafka.start()
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic, throughput=self.producer_throughput,
message_validator=is_int,
compression_types=compression_types,
version=KafkaVersion(producer_version))
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
self.topic, consumer_timeout_ms=30000, new_consumer=new_consumer,
message_validator=is_int, version=KafkaVersion(consumer_version))
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
示例7: test_compressed_topic
def test_compressed_topic(self, compression_types):
"""Test produce => consume => validate for compressed topics
Setup: 1 zk, 1 kafka node, 1 topic with partitions=10, replication-factor=1
compression_types parameter gives a list of compression types (or no compression if
"none"). Each producer in a VerifiableProducer group (num_producers = 4) will use a
compression type from the list based on producer's index in the group.
- Produce messages in the background
- Consume messages in the background
- Stop producing, and finish consuming
- Validate that every acked message was consumed
"""
self.kafka.security_protocol = "PLAINTEXT"
self.kafka.interbroker_security_protocol = self.kafka.security_protocol
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic, throughput=self.producer_throughput,
message_validator=is_int_with_prefix,
compression_types=compression_types)
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
consumer_timeout_ms=60000, message_validator=is_int_with_prefix)
self.kafka.start()
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
示例8: test_consumer_back_compatibility
def test_consumer_back_compatibility(self):
"""Run the scala 0.8.X consumer against an 0.9.X cluster.
Expect 0.8.X scala consumer to fail with buffer underflow. This error is the same as when an 0.9.X producer
is run against an 0.8.X broker: the broker responds to a V1 fetch request with a V0 fetch response; the
client then tries to parse this V0 fetch response as a V1 fetch response, resulting in a BufferUnderflowException
"""
num_messages = 10
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic, max_messages=num_messages,
throughput=self.producer_throughput, version=LATEST_0_8_2)
self.consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic, group_id="consumer-09X",
consumer_timeout_ms=10000, message_validator=is_int, version=TRUNK)
self.old_consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic, group_id="consumer-08X",
consumer_timeout_ms=10000, message_validator=is_int, version=LATEST_0_8_2)
self.producer.run()
self.consumer.run()
self.old_consumer.run()
consumed = len(self.consumer.messages_consumed[1])
old_consumed = len(self.old_consumer.messages_consumed[1])
assert old_consumed == num_messages, "Expected 0.8.X scala consumer to consume %d, but only got %d" % (num_messages, old_consumed)
assert consumed == 0, "Expected 0.9.X scala consumer to fail to consume any messages, but got %d" % consumed
self.logger.info("Grepping consumer log for expected error type")
node = self.consumer.nodes[0]
node.account.ssh("egrep -m 1 %s %s" % ("\"java\.nio\.BufferUnderflowException\"", self.consumer.LOG_FILE), allow_fail=False)
示例9: CompressionTest
class CompressionTest(ProduceConsumeValidateTest):
"""
These tests validate produce / consume for compressed topics.
"""
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(CompressionTest, self).__init__(test_context=test_context)
self.topic = "test_topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk, topics={self.topic: {
"partitions": 10,
"replication-factor": 1}})
self.num_partitions = 10
self.timeout_sec = 60
self.producer_throughput = 1000
self.num_producers = 4
self.messages_per_producer = 1000
self.num_consumers = 1
def setUp(self):
self.zk.start()
def min_cluster_size(self):
# Override this since we're adding services outside of the constructor
return super(CompressionTest, self).min_cluster_size() + self.num_producers + self.num_consumers
@parametrize(compression_types=["snappy","gzip","lz4","none"], new_consumer=True)
@parametrize(compression_types=["snappy","gzip","lz4","none"], new_consumer=False)
def test_compressed_topic(self, compression_types, new_consumer):
"""Test produce => consume => validate for compressed topics
Setup: 1 zk, 1 kafka node, 1 topic with partitions=10, replication-factor=1
compression_types parameter gives a list of compression types (or no compression if
"none"). Each producer in a VerifiableProducer group (num_producers = 4) will use a
compression type from the list based on producer's index in the group.
- Produce messages in the background
- Consume messages in the background
- Stop producing, and finish consuming
- Validate that every acked message was consumed
"""
self.kafka.security_protocol = "PLAINTEXT"
self.kafka.interbroker_security_protocol = self.kafka.security_protocol
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic, throughput=self.producer_throughput,
message_validator=is_int_with_prefix,
compression_types=compression_types)
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
new_consumer=new_consumer, consumer_timeout_ms=60000,
message_validator=is_int_with_prefix)
self.kafka.start()
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
示例10: TestVerifiableProducer
class TestVerifiableProducer(Test):
"""Sanity checks on verifiable producer service class."""
def __init__(self, test_context):
super(TestVerifiableProducer, self).__init__(test_context)
self.topic = "topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk,
topics={self.topic: {"partitions": 1, "replication-factor": 1}})
self.num_messages = 1000
# This will produce to source kafka cluster
self.producer = VerifiableProducer(test_context, num_nodes=1, kafka=self.kafka, topic=self.topic,
max_messages=self.num_messages, throughput=1000)
def setUp(self):
self.zk.start()
self.kafka.start()
@parametrize(producer_version=str(LATEST_0_8_2))
@parametrize(producer_version=str(LATEST_0_9))
@parametrize(producer_version=str(TRUNK))
def test_simple_run(self, producer_version=TRUNK):
"""
Test that we can start VerifiableProducer on trunk or against the 0.8.2 jar, and
verify that we can produce a small number of messages.
"""
node = self.producer.nodes[0]
node.version = KafkaVersion(producer_version)
self.producer.start()
wait_until(lambda: self.producer.num_acked > 5, timeout_sec=5,
err_msg="Producer failed to start in a reasonable amount of time.")
# using version.vstring (distutils.version.LooseVersion) is a tricky way of ensuring
# that this check works with TRUNK
# When running VerifiableProducer 0.8.X, both trunk version and 0.8.X should show up because of the way
# verifiable producer pulls in some trunk directories into its classpath
if node.version <= LATEST_0_8_2:
assert is_version(node, [node.version.vstring, TRUNK.vstring])
else:
assert is_version(node, [node.version.vstring])
self.producer.wait()
num_produced = self.producer.num_acked
assert num_produced == self.num_messages, "num_produced: %d, num_messages: %d" % (num_produced, self.num_messages)
示例11: ClientCompatibilityTestNewBroker
class ClientCompatibilityTestNewBroker(ProduceConsumeValidateTest):
def __init__(self, test_context):
super(ClientCompatibilityTestNewBroker, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
# Producer and consumer
self.producer_throughput = 10000
self.num_producers = 1
self.num_consumers = 1
self.messages_per_producer = 1000
@cluster(num_nodes=6)
@parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
@parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH), compression_types=["none"], timestamp_type=str("LogAppendTime"))
@parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(LATEST_0_9), compression_types=["none"], new_consumer=False, timestamp_type=None)
@parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(LATEST_0_9), compression_types=["snappy"], timestamp_type=str("CreateTime"))
@parametrize(producer_version=str(LATEST_1_1), consumer_version=str(LATEST_1_1), compression_types=["lz4"], timestamp_type=str("CreateTime"))
@parametrize(producer_version=str(LATEST_1_0), consumer_version=str(LATEST_1_0), compression_types=["none"], timestamp_type=str("CreateTime"))
@parametrize(producer_version=str(LATEST_0_11_0), consumer_version=str(LATEST_0_11_0), compression_types=["gzip"], timestamp_type=str("CreateTime"))
@parametrize(producer_version=str(LATEST_0_10_2), consumer_version=str(LATEST_0_10_2), compression_types=["lz4"], timestamp_type=str("CreateTime"))
@parametrize(producer_version=str(LATEST_0_10_1), consumer_version=str(LATEST_0_10_1), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
@parametrize(producer_version=str(LATEST_0_10_0), consumer_version=str(LATEST_0_10_0), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
@parametrize(producer_version=str(LATEST_0_9), consumer_version=str(DEV_BRANCH), compression_types=["none"], timestamp_type=None)
@parametrize(producer_version=str(LATEST_0_9), consumer_version=str(DEV_BRANCH), compression_types=["snappy"], timestamp_type=None)
@parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
@parametrize(producer_version=str(LATEST_0_8_2), consumer_version=str(LATEST_0_8_2), compression_types=["none"], new_consumer=False, timestamp_type=None)
def test_compatibility(self, producer_version, consumer_version, compression_types, new_consumer=True, timestamp_type=None):
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
'configs': {"min.insync.replicas": 2}}})
for node in self.kafka.nodes:
if timestamp_type is not None:
node.config[config_property.MESSAGE_TIMESTAMP_TYPE] = timestamp_type
self.kafka.start()
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic, throughput=self.producer_throughput,
message_validator=is_int,
compression_types=compression_types,
version=KafkaVersion(producer_version))
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
self.topic, consumer_timeout_ms=30000, new_consumer=new_consumer,
message_validator=is_int, version=KafkaVersion(consumer_version))
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
示例12: __init__
def __init__(self, test_context):
super(TestVerifiableProducer, self).__init__(test_context)
self.topic = "topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk,
topics={self.topic: {"partitions": 1, "replication-factor": 1}})
self.num_messages = 1000
# This will produce to source kafka cluster
self.producer = VerifiableProducer(test_context, num_nodes=1, kafka=self.kafka, topic=self.topic,
max_messages=self.num_messages, throughput=1000)
示例13: ClientCompatibilityProduceConsumeTest
class ClientCompatibilityProduceConsumeTest(ProduceConsumeValidateTest):
"""
These tests validate that we can use a new client to produce and consume from older brokers.
"""
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(ClientCompatibilityProduceConsumeTest, self).__init__(test_context=test_context)
self.topic = "test_topic"
self.zk = ZookeeperService(test_context, num_nodes=3)
self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={self.topic:{
"partitions": 10,
"replication-factor": 2}})
self.num_partitions = 10
self.timeout_sec = 60
self.producer_throughput = 1000
self.num_producers = 2
self.messages_per_producer = 1000
self.num_consumers = 1
def setUp(self):
self.zk.start()
def min_cluster_size(self):
# Override this since we're adding services outside of the constructor
return super(ClientCompatibilityProduceConsumeTest, self).min_cluster_size() + self.num_producers + self.num_consumers
@parametrize(broker_version=str(DEV_BRANCH))
@parametrize(broker_version=str(LATEST_0_10_0))
@parametrize(broker_version=str(LATEST_0_10_1))
@parametrize(broker_version=str(LATEST_0_10_2))
@parametrize(broker_version=str(LATEST_0_11_0))
@parametrize(broker_version=str(LATEST_1_0))
@parametrize(broker_version=str(LATEST_1_1))
def test_produce_consume(self, broker_version):
print("running producer_consumer_compat with broker_version = %s" % broker_version)
self.kafka.set_version(KafkaVersion(broker_version))
self.kafka.security_protocol = "PLAINTEXT"
self.kafka.interbroker_security_protocol = self.kafka.security_protocol
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic, throughput=self.producer_throughput,
message_validator=is_int_with_prefix)
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
consumer_timeout_ms=60000,
message_validator=is_int_with_prefix)
self.kafka.start()
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
示例14: produce_and_consume
def produce_and_consume(self, producer_version, consumer_version, group):
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic,
throughput=self.producer_throughput,
message_validator=is_int,
version=KafkaVersion(producer_version))
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
self.topic, consumer_timeout_ms=30000,
message_validator=is_int, version=KafkaVersion(consumer_version))
self.consumer.group_id = group
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
示例15: test_produce_consume
def test_produce_consume(self, broker_version):
print("running producer_consumer_compat with broker_version = %s" % broker_version)
self.kafka.set_version(KafkaVersion(broker_version))
self.kafka.security_protocol = "PLAINTEXT"
self.kafka.interbroker_security_protocol = self.kafka.security_protocol
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic, throughput=self.producer_throughput,
message_validator=is_int_with_prefix)
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
consumer_timeout_ms=60000,
message_validator=is_int_with_prefix)
self.kafka.start()
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))