本文整理汇总了Python中kafkatest.services.verifiable_producer.VerifiableProducer.start方法的典型用法代码示例。如果您正苦于以下问题:Python VerifiableProducer.start方法的具体用法?Python VerifiableProducer.start怎么用?Python VerifiableProducer.start使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafkatest.services.verifiable_producer.VerifiableProducer
的用法示例。
在下文中一共展示了VerifiableProducer.start方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: move_start_offset
# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import start [as 别名]
def move_start_offset(self):
"""We move the start offset of the topic by writing really old messages
and waiting for them to be cleaned up.
"""
producer = VerifiableProducer(self.test_context, 1, self.kafka, self.topic,
throughput=-1, enable_idempotence=True,
create_time=1000)
producer.start()
wait_until(lambda: producer.num_acked > 0,
timeout_sec=30,
err_msg="Failed to get an acknowledgement for %ds" % 30)
# Wait 8 seconds to let the topic be seeded with messages that will
# be deleted. The 8 seconds is important, since we should get 2 deleted
# segments in this period based on the configured log roll time and the
# retention check interval.
time.sleep(8)
producer.stop()
self.logger.info("Seeded topic with %d messages which will be deleted" %\
producer.num_acked)
# Since the configured check interval is 5 seconds, we wait another
# 6 seconds to ensure that at least one more cleaning so that the last
# segment is deleted. An altenate to using timeouts is to poll each
# partition until the log start offset matches the end offset. The
# latter is more robust.
time.sleep(6)
示例2: TestVerifiableProducer
# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import start [as 别名]
class TestVerifiableProducer(Test):
"""Sanity checks on verifiable producer service class."""
def __init__(self, test_context):
super(TestVerifiableProducer, self).__init__(test_context)
self.topic = "topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk,
topics={self.topic: {"partitions": 1, "replication-factor": 1}})
self.num_messages = 1000
# This will produce to source kafka cluster
self.producer = VerifiableProducer(test_context, num_nodes=1, kafka=self.kafka, topic=self.topic,
max_messages=self.num_messages, throughput=self.num_messages/5)
def setUp(self):
self.zk.start()
self.kafka.start()
@cluster(num_nodes=3)
@parametrize(producer_version=str(LATEST_0_8_2))
@parametrize(producer_version=str(LATEST_0_9))
@parametrize(producer_version=str(LATEST_0_10_0))
@parametrize(producer_version=str(LATEST_0_10_1))
@parametrize(producer_version=str(DEV_BRANCH))
def test_simple_run(self, producer_version=DEV_BRANCH):
"""
Test that we can start VerifiableProducer on the current branch snapshot version or against the 0.8.2 jar, and
verify that we can produce a small number of messages.
"""
node = self.producer.nodes[0]
node.version = KafkaVersion(producer_version)
self.producer.start()
wait_until(lambda: self.producer.num_acked > 5, timeout_sec=5,
err_msg="Producer failed to start in a reasonable amount of time.")
# using version.vstring (distutils.version.LooseVersion) is a tricky way of ensuring
# that this check works with DEV_BRANCH
# When running VerifiableProducer 0.8.X, both the current branch version and 0.8.X should show up because of the
# way verifiable producer pulls in some development directories into its classpath
#
# If the test fails here because 'ps .. | grep' couldn't find the process it means
# the login and grep that is_version() performs is slower than
# the time it takes the producer to produce its messages.
# Easy fix is to decrease throughput= above, the good fix is to make the producer
# not terminate until explicitly killed in this case.
if node.version <= LATEST_0_8_2:
assert is_version(node, [node.version.vstring, DEV_BRANCH.vstring], logger=self.logger)
else:
assert is_version(node, [node.version.vstring], logger=self.logger)
self.producer.wait()
num_produced = self.producer.num_acked
assert num_produced == self.num_messages, "num_produced: %d, num_messages: %d" % (num_produced, self.num_messages)
示例3: SimpleConsumerShellTest
# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import start [as 别名]
class SimpleConsumerShellTest(Test):
"""
Tests SimpleConsumerShell tool
"""
def __init__(self, test_context):
super(SimpleConsumerShellTest, self).__init__(test_context)
self.num_zk = 1
self.num_brokers = 1
self.messages_received_count = 0
self.topics = {TOPIC: {"partitions": NUM_PARTITIONS, "replication-factor": REPLICATION_FACTOR}}
self.zk = ZookeeperService(test_context, self.num_zk)
def setUp(self):
self.zk.start()
def start_kafka(self):
self.kafka = KafkaService(self.test_context, self.num_brokers, self.zk, topics=self.topics)
self.kafka.start()
def run_producer(self):
# This will produce to kafka cluster
self.producer = VerifiableProducer(
self.test_context, num_nodes=1, kafka=self.kafka, topic=TOPIC, throughput=1000, max_messages=MAX_MESSAGES
)
self.producer.start()
wait_until(
lambda: self.producer.num_acked == MAX_MESSAGES,
timeout_sec=10,
err_msg="Timeout awaiting messages to be produced and acked",
)
def start_simple_consumer_shell(self):
self.simple_consumer_shell = SimpleConsumerShell(self.test_context, 1, self.kafka, TOPIC)
self.simple_consumer_shell.start()
def test_simple_consumer_shell(self):
"""
Tests if SimpleConsumerShell is fetching expected records
:return: None
"""
self.start_kafka()
self.run_producer()
self.start_simple_consumer_shell()
# Assert that SimpleConsumerShell is fetching expected number of messages
wait_until(
lambda: self.simple_consumer_shell.get_output().count("\n") == (MAX_MESSAGES + 1),
timeout_sec=10,
err_msg="Timed out waiting to receive expected number of messages.",
)
示例4: seed_messages
# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import start [as 别名]
def seed_messages(self, topic, num_seed_messages):
seed_timeout_sec = 10000
seed_producer = VerifiableProducer(context=self.test_context,
num_nodes=1,
kafka=self.kafka,
topic=topic,
message_validator=is_int,
max_messages=num_seed_messages,
enable_idempotence=True)
seed_producer.start()
wait_until(lambda: seed_producer.num_acked >= num_seed_messages,
timeout_sec=seed_timeout_sec,
err_msg="Producer failed to produce messages %d in %ds." %\
(self.num_seed_messages, seed_timeout_sec))
return seed_producer.acked
示例5: TestVerifiableProducer
# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import start [as 别名]
class TestVerifiableProducer(Test):
"""Sanity checks on verifiable producer service class."""
def __init__(self, test_context):
super(TestVerifiableProducer, self).__init__(test_context)
self.topic = "topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk,
topics={self.topic: {"partitions": 1, "replication-factor": 1}})
self.num_messages = 1000
# This will produce to source kafka cluster
self.producer = VerifiableProducer(test_context, num_nodes=1, kafka=self.kafka, topic=self.topic,
max_messages=self.num_messages, throughput=1000)
def setUp(self):
self.zk.start()
self.kafka.start()
@parametrize(producer_version=str(LATEST_0_8_2))
@parametrize(producer_version=str(LATEST_0_9))
@parametrize(producer_version=str(TRUNK))
def test_simple_run(self, producer_version=TRUNK):
"""
Test that we can start VerifiableProducer on trunk or against the 0.8.2 jar, and
verify that we can produce a small number of messages.
"""
node = self.producer.nodes[0]
node.version = KafkaVersion(producer_version)
self.producer.start()
wait_until(lambda: self.producer.num_acked > 5, timeout_sec=5,
err_msg="Producer failed to start in a reasonable amount of time.")
# using version.vstring (distutils.version.LooseVersion) is a tricky way of ensuring
# that this check works with TRUNK
# When running VerifiableProducer 0.8.X, both trunk version and 0.8.X should show up because of the way
# verifiable producer pulls in some trunk directories into its classpath
if node.version <= LATEST_0_8_2:
assert is_version(node, [node.version.vstring, TRUNK.vstring])
else:
assert is_version(node, [node.version.vstring])
self.producer.wait()
num_produced = self.producer.num_acked
assert num_produced == self.num_messages, "num_produced: %d, num_messages: %d" % (num_produced, self.num_messages)
示例6: ReplicaVerificationToolTest
# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import start [as 别名]
class ReplicaVerificationToolTest(Test):
"""
Tests ReplicaVerificationTool
"""
def __init__(self, test_context):
super(ReplicaVerificationToolTest, self).__init__(test_context)
self.num_zk = 1
self.num_brokers = 2
self.messages_received_count = 0
self.topics = {
TOPIC: {'partitions': 1, 'replication-factor': 2}
}
self.zk = ZookeeperService(test_context, self.num_zk)
self.kafka = None
self.producer = None
self.replica_verifier = None
def setUp(self):
self.zk.start()
def start_kafka(self, security_protocol, interbroker_security_protocol):
self.kafka = KafkaService(
self.test_context, self.num_brokers,
self.zk, security_protocol=security_protocol,
interbroker_security_protocol=interbroker_security_protocol, topics=self.topics)
self.kafka.start()
def start_replica_verification_tool(self, security_protocol):
self.replica_verifier = ReplicaVerificationTool(self.test_context, 1, self.kafka, TOPIC, report_interval_ms=REPORT_INTERVAL_MS, security_protocol=security_protocol)
self.replica_verifier.start()
def start_producer(self, max_messages, acks, timeout):
# This will produce to kafka cluster
self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=TOPIC, throughput=1000, acks=acks, max_messages=max_messages)
current_acked = self.producer.num_acked
self.logger.info("current_acked = %s" % current_acked)
self.producer.start()
wait_until(lambda: acks == 0 or self.producer.num_acked >= current_acked + max_messages, timeout_sec=timeout,
err_msg="Timeout awaiting messages to be produced and acked")
def stop_producer(self):
self.producer.stop()
def test_replica_lags(self, security_protocol='PLAINTEXT'):
"""
Tests ReplicaVerificationTool
:return: None
"""
self.start_kafka(security_protocol, security_protocol)
self.start_replica_verification_tool(security_protocol)
self.start_producer(max_messages=10, acks=-1, timeout=15)
# Verify that there is no lag in replicas and is correctly reported by ReplicaVerificationTool
wait_until(lambda: self.replica_verifier.get_lag_for_partition(TOPIC, 0) == 0, timeout_sec=10,
err_msg="Timed out waiting to reach zero replica lags.")
self.stop_producer()
self.start_producer(max_messages=1000, acks=0, timeout=5)
# Verify that there is lag in replicas and is correctly reported by ReplicaVerificationTool
wait_until(lambda: self.replica_verifier.get_lag_for_partition(TOPIC, 0) > 0, timeout_sec=10,
err_msg="Timed out waiting to reach non-zero number of replica lags.")
示例7: ClientCompatibilityTest
# 需要导入模块: from kafkatest.services.verifiable_producer import VerifiableProducer [as 别名]
# 或者: from kafkatest.services.verifiable_producer.VerifiableProducer import start [as 别名]
class ClientCompatibilityTest(Test):
def __init__(self, test_context):
super(ClientCompatibilityTest, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=LATEST_0_8_2, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
'configs': {"min.insync.replicas": 2}}})
self.zk.start()
self.kafka.start()
# Producer and consumer
self.producer_throughput = 10000
self.num_producers = 1
self.num_consumers = 1
def test_producer_back_compatibility(self):
"""Run 0.9.X java producer against 0.8.X brokers.
This test documents the fact that java producer v0.9.0.0 and later won't run against 0.8.X brokers
the broker responds to a V1 produce request with a V0 fetch response; the client then tries to parse this V0
produce response as a V1 produce response, resulting in a BufferUnderflowException
"""
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic, max_messages=100,
throughput=self.producer_throughput, version=TRUNK)
node = self.producer.nodes[0]
try:
self.producer.start()
self.producer.wait()
raise Exception("0.9.X java producer should not run successfully against 0.8.X broker")
except:
# Expected
pass
finally:
self.producer.kill_node(node, clean_shutdown=False)
self.logger.info("Grepping producer log for expected error type")
node.account.ssh("egrep -m 1 %s %s" % ("\"org\.apache\.kafka\.common\.protocol\.types\.SchemaException.*throttle_time_ms.*: java\.nio\.BufferUnderflowException\"", self.producer.LOG_FILE), allow_fail=False)
def test_consumer_back_compatibility(self):
"""Run the scala 0.8.X consumer against an 0.9.X cluster.
Expect 0.8.X scala consumer to fail with buffer underflow. This error is the same as when an 0.9.X producer
is run against an 0.8.X broker: the broker responds to a V1 fetch request with a V0 fetch response; the
client then tries to parse this V0 fetch response as a V1 fetch response, resulting in a BufferUnderflowException
"""
num_messages = 10
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic, max_messages=num_messages,
throughput=self.producer_throughput, version=LATEST_0_8_2)
self.consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic, group_id="consumer-09X",
consumer_timeout_ms=10000, message_validator=is_int, version=TRUNK)
self.old_consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic, group_id="consumer-08X",
consumer_timeout_ms=10000, message_validator=is_int, version=LATEST_0_8_2)
self.producer.run()
self.consumer.run()
self.old_consumer.run()
consumed = len(self.consumer.messages_consumed[1])
old_consumed = len(self.old_consumer.messages_consumed[1])
assert old_consumed == num_messages, "Expected 0.8.X scala consumer to consume %d, but only got %d" % (num_messages, old_consumed)
assert consumed == 0, "Expected 0.9.X scala consumer to fail to consume any messages, but got %d" % consumed
self.logger.info("Grepping consumer log for expected error type")
node = self.consumer.nodes[0]
node.account.ssh("egrep -m 1 %s %s" % ("\"java\.nio\.BufferUnderflowException\"", self.consumer.LOG_FILE), allow_fail=False)