当前位置: 首页>>代码示例>>Python>>正文


Python VerifiableProducer.each_produced_at_least方法代码示例

本文整理汇总了Python中kafkatest.services.verifiable_producer.VerifiableProducer.each_produced_at_least方法的典型用法代码示例。如果您正苦于以下问题:Python VerifiableProducer.each_produced_at_least方法的具体用法?Python VerifiableProducer.each_produced_at_least怎么用?Python VerifiableProducer.each_produced_at_least使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafkatest.services.verifiable_producer.VerifiableProducer的用法示例。


在下文中一共展示了VerifiableProducer.each_produced_at_least方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: CompressionTest

# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import each_produced_at_least [as 别名]
class CompressionTest(ProduceConsumeValidateTest):
    """
    These tests validate produce / consume for compressed topics.
    """

    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(CompressionTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk, topics={self.topic: {
                                                                    "partitions": 10,
                                                                    "replication-factor": 1}})
        self.num_partitions = 10
        self.timeout_sec = 60
        self.producer_throughput = 1000
        self.num_producers = 4
        self.messages_per_producer = 1000
        self.num_consumers = 1

    def setUp(self):
        self.zk.start()

    def min_cluster_size(self):
        # Override this since we're adding services outside of the constructor
        return super(CompressionTest, self).min_cluster_size() + self.num_producers + self.num_consumers

    @parametrize(compression_types=["snappy","gzip","lz4","none"], new_consumer=True)
    @parametrize(compression_types=["snappy","gzip","lz4","none"], new_consumer=False)
    def test_compressed_topic(self, compression_types, new_consumer):
        """Test produce => consume => validate for compressed topics
        Setup: 1 zk, 1 kafka node, 1 topic with partitions=10, replication-factor=1

        compression_types parameter gives a list of compression types (or no compression if
        "none"). Each producer in a VerifiableProducer group (num_producers = 4) will use a
        compression type from the list based on producer's index in the group.

            - Produce messages in the background
            - Consume messages in the background
            - Stop producing, and finish consuming
            - Validate that every acked message was consumed
        """

        self.kafka.security_protocol = "PLAINTEXT"
        self.kafka.interbroker_security_protocol = self.kafka.security_protocol
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic, throughput=self.producer_throughput,
                                           message_validator=is_int_with_prefix,
                                           compression_types=compression_types)
        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
                                        new_consumer=new_consumer, consumer_timeout_ms=60000,
                                        message_validator=is_int_with_prefix)
        self.kafka.start()

        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))
开发者ID:Paulf-999,项目名称:kafka,代码行数:61,代码来源:compression_test.py

示例2: ClientCompatibilityTestNewBroker

# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import each_produced_at_least [as 别名]
class ClientCompatibilityTestNewBroker(ProduceConsumeValidateTest):

    def __init__(self, test_context):
        super(ClientCompatibilityTestNewBroker, self).__init__(test_context=test_context)

    def setUp(self):
        self.topic = "test_topic"
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
            
        self.zk.start()

        # Producer and consumer
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
        self.messages_per_producer = 1000

    @cluster(num_nodes=6)
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH), compression_types=["none"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(LATEST_0_9), compression_types=["none"], new_consumer=False, timestamp_type=None)
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(LATEST_0_9), compression_types=["snappy"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_1_1), consumer_version=str(LATEST_1_1), compression_types=["lz4"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_1_0), consumer_version=str(LATEST_1_0), compression_types=["none"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_0_11_0), consumer_version=str(LATEST_0_11_0), compression_types=["gzip"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_0_10_2), consumer_version=str(LATEST_0_10_2), compression_types=["lz4"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_0_10_1), consumer_version=str(LATEST_0_10_1), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(LATEST_0_10_0), consumer_version=str(LATEST_0_10_0), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(DEV_BRANCH), compression_types=["none"], timestamp_type=None)
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(DEV_BRANCH), compression_types=["snappy"], timestamp_type=None)
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(LATEST_0_8_2), consumer_version=str(LATEST_0_8_2), compression_types=["none"], new_consumer=False, timestamp_type=None)
    def test_compatibility(self, producer_version, consumer_version, compression_types, new_consumer=True, timestamp_type=None):

        self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: {
                                                                    "partitions": 3,
                                                                    "replication-factor": 3,
                                                                    'configs': {"min.insync.replicas": 2}}})
        for node in self.kafka.nodes:
            if timestamp_type is not None:
                node.config[config_property.MESSAGE_TIMESTAMP_TYPE] = timestamp_type
        self.kafka.start()
         
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic, throughput=self.producer_throughput,
                                           message_validator=is_int,
                                           compression_types=compression_types,
                                           version=KafkaVersion(producer_version))

        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
                                        self.topic, consumer_timeout_ms=30000, new_consumer=new_consumer,
                                        message_validator=is_int, version=KafkaVersion(consumer_version))

        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))
开发者ID:harshach,项目名称:kafka,代码行数:59,代码来源:compatibility_test_new_broker_test.py

示例3: ClientCompatibilityProduceConsumeTest

# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import each_produced_at_least [as 别名]
class ClientCompatibilityProduceConsumeTest(ProduceConsumeValidateTest):
    """
    These tests validate that we can use a new client to produce and consume from older brokers.
    """

    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ClientCompatibilityProduceConsumeTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=3)
        self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={self.topic:{
                                                                    "partitions": 10,
                                                                    "replication-factor": 2}})
        self.num_partitions = 10
        self.timeout_sec = 60
        self.producer_throughput = 1000
        self.num_producers = 2
        self.messages_per_producer = 1000
        self.num_consumers = 1

    def setUp(self):
        self.zk.start()

    def min_cluster_size(self):
        # Override this since we're adding services outside of the constructor
        return super(ClientCompatibilityProduceConsumeTest, self).min_cluster_size() + self.num_producers + self.num_consumers

    @parametrize(broker_version=str(DEV_BRANCH))
    @parametrize(broker_version=str(LATEST_0_10_0))
    @parametrize(broker_version=str(LATEST_0_10_1))
    @parametrize(broker_version=str(LATEST_0_10_2))
    @parametrize(broker_version=str(LATEST_0_11_0))
    @parametrize(broker_version=str(LATEST_1_0))
    @parametrize(broker_version=str(LATEST_1_1))
    def test_produce_consume(self, broker_version):
        print("running producer_consumer_compat with broker_version = %s" % broker_version)
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.security_protocol = "PLAINTEXT"
        self.kafka.interbroker_security_protocol = self.kafka.security_protocol
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic, throughput=self.producer_throughput,
                                           message_validator=is_int_with_prefix)
        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
                                        consumer_timeout_ms=60000,
                                        message_validator=is_int_with_prefix)
        self.kafka.start()

        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))
开发者ID:harshach,项目名称:kafka,代码行数:54,代码来源:client_compatibility_produce_consume_test.py

示例4: MessageFormatChangeTest

# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import each_produced_at_least [as 别名]
class MessageFormatChangeTest(ProduceConsumeValidateTest):

    def __init__(self, test_context):
        super(MessageFormatChangeTest, self).__init__(test_context=test_context)

    def setUp(self):
        self.topic = "test_topic"
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
            
        self.zk.start()

        # Producer and consumer
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
        self.messages_per_producer = 100

    def produce_and_consume(self, producer_version, consumer_version, group):
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic,
                                           throughput=self.producer_throughput,
                                           message_validator=is_int,
                                           version=KafkaVersion(producer_version))
        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
                                        self.topic, consumer_timeout_ms=30000,
                                        message_validator=is_int, version=KafkaVersion(consumer_version))
        self.consumer.group_id = group
        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))
        
    @parametrize(producer_version=str(TRUNK), consumer_version=str(TRUNK))
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9))
    def test_compatibility(self, producer_version, consumer_version):
        """ This tests performs the following checks:
        The workload is a mix of 0.9.x and 0.10.x producers and consumers 
        that produce to and consume from a 0.10.x cluster
        1. initially the topic is using message format 0.9.0
        2. change the message format version for topic to 0.10.0 on the fly.
        3. change the message format version for topic back to 0.9.0 on the fly.
        - The producers and consumers should not have any issue.
        - Note that for 0.9.x consumers/producers we only do steps 1 and 2
        """
        self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=TRUNK, topics={self.topic: {
                                                                    "partitions": 3,
                                                                    "replication-factor": 3,
                                                                    'configs': {"min.insync.replicas": 2}}})
       
        self.kafka.start()
        self.logger.info("First format change to 0.9.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
        self.produce_and_consume(producer_version, consumer_version, "group1")

        self.logger.info("Second format change to 0.10.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
        self.produce_and_consume(producer_version, consumer_version, "group2")

        if producer_version == str(TRUNK) and consumer_version == str(TRUNK):
            self.logger.info("Third format change back to 0.9.0")
            self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
            self.produce_and_consume(producer_version, consumer_version, "group3")
开发者ID:hk-Lei,项目名称:kafka-0.10.0.0-annotated,代码行数:64,代码来源:message_format_change_test.py

示例5: MessageFormatChangeTest

# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import each_produced_at_least [as 别名]
class MessageFormatChangeTest(ProduceConsumeValidateTest):

    def __init__(self, test_context):
        super(MessageFormatChangeTest, self).__init__(test_context=test_context)

    def setUp(self):
        self.topic = "test_topic"
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
            
        self.zk.start()

        # Producer and consumer
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
        self.messages_per_producer = 100

    def produce_and_consume(self, producer_version, consumer_version, group):
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic,
                                           throughput=self.producer_throughput,
                                           message_validator=is_int,
                                           version=KafkaVersion(producer_version))
        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
                                        self.topic, consumer_timeout_ms=30000,
                                        message_validator=is_int, version=KafkaVersion(consumer_version))
        self.consumer.group_id = group
        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))

    @cluster(num_nodes=12)
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH))
    @parametrize(producer_version=str(LATEST_0_10), consumer_version=str(LATEST_0_10))
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9))
    def test_compatibility(self, producer_version, consumer_version):
        """ This tests performs the following checks:
        The workload is a mix of 0.9.x, 0.10.x and 0.11.x producers and consumers
        that produce to and consume from a DEV_BRANCH cluster
        1. initially the topic is using message format 0.9.0
        2. change the message format version for topic to 0.10.0 on the fly.
        3. change the message format version for topic to 0.11.0 on the fly.
        4. change the message format version for topic back to 0.10.0 on the fly (only if the client version is 0.11.0 or newer)
        - The producers and consumers should not have any issue.

        Note regarding step number 4. Downgrading the message format version is generally unsupported as it breaks
        older clients. More concretely, if we downgrade a topic from 0.11.0 to 0.10.0 after it contains messages with
        version 0.11.0, we will return the 0.11.0 messages without down conversion due to an optimisation in the
        handling of fetch requests. This will break any consumer that doesn't support 0.11.0. So, in practice, step 4
        is similar to step 2 and it didn't seem worth it to increase the cluster size to in order to add a step 5 that
        would change the message format version for the topic back to 0.9.0.0.
        """
        self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: {
                                                                    "partitions": 3,
                                                                    "replication-factor": 3,
                                                                    'configs': {"min.insync.replicas": 2}}})
       
        self.kafka.start()
        self.logger.info("First format change to 0.9.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
        self.produce_and_consume(producer_version, consumer_version, "group1")

        self.logger.info("Second format change to 0.10.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
        self.produce_and_consume(producer_version, consumer_version, "group2")

        self.logger.info("Third format change to 0.11.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_11))
        self.produce_and_consume(producer_version, consumer_version, "group3")

        if producer_version == str(DEV_BRANCH) and consumer_version == str(DEV_BRANCH):
            self.logger.info("Fourth format change back to 0.10.0")
            self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
            self.produce_and_consume(producer_version, consumer_version, "group4")
开发者ID:harshach,项目名称:kafka,代码行数:77,代码来源:message_format_change_test.py


注:本文中的kafkatest.services.verifiable_producer.VerifiableProducer.each_produced_at_least方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。