本文整理汇总了Python中test.testutil.random_string函数的典型用法代码示例。如果您正苦于以下问题:Python random_string函数的具体用法?Python random_string怎么用?Python random_string使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了random_string函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_end_to_end
def test_end_to_end(kafka_broker):
connect_str = 'localhost:' + str(kafka_broker.port)
producer = KafkaProducer(bootstrap_servers=connect_str,
max_block_ms=10000,
value_serializer=str.encode)
consumer = KafkaConsumer(bootstrap_servers=connect_str,
group_id=None,
consumer_timeout_ms=10000,
auto_offset_reset='earliest',
value_deserializer=bytes.decode)
topic = random_string(5)
for i in range(1000):
producer.send(topic, 'msg %d' % i)
producer.flush()
producer.close()
consumer.subscribe([topic])
msgs = set()
for i in range(1000):
try:
msgs.add(next(consumer).value)
except StopIteration:
break
assert msgs == set(['msg %d' % i for i in range(1000)])
示例2: _send_random_messages
def _send_random_messages(self, producer, topic, partition, n):
for j in range(n):
logging.debug('_send_random_message to %s:%d -- try %d', topic, partition, j)
resp = producer.send_messages(topic, partition, random_string(10))
if len(resp) > 0:
self.assertEqual(resp[0].error, 0)
logging.debug('_send_random_message to %s:%d -- try %d success', topic, partition, j)
示例3: test_heartbeat_thread
def test_heartbeat_thread(kafka_broker, topic):
group_id = 'test-group-' + random_string(6)
consumer = KafkaConsumer(topic,
bootstrap_servers=get_connect_str(kafka_broker),
group_id=group_id,
heartbeat_interval_ms=500)
# poll until we have joined group / have assignment
while not consumer.assignment():
consumer.poll(timeout_ms=100)
assert consumer._coordinator.state is MemberState.STABLE
last_poll = consumer._coordinator.heartbeat.last_poll
last_beat = consumer._coordinator.heartbeat.last_send
timeout = time.time() + 30
while True:
if time.time() > timeout:
raise RuntimeError('timeout waiting for heartbeat')
if consumer._coordinator.heartbeat.last_send > last_beat:
break
time.sleep(0.5)
assert consumer._coordinator.heartbeat.last_poll == last_poll
consumer.poll(timeout_ms=100)
assert consumer._coordinator.heartbeat.last_poll > last_poll
consumer.close()
示例4: test_huge_messages
def test_huge_messages(self):
huge_message, = self.send_messages(0, [
create_message(random_string(MAX_FETCH_BUFFER_SIZE_BYTES + 10)),
])
# Create a consumer with the default buffer size
consumer = self.consumer()
# This consumer failes to get the message
with self.assertRaises(ConsumerFetchSizeTooSmall):
consumer.get_message(False, 0.1)
consumer.stop()
# Create a consumer with no fetch size limit
big_consumer = self.consumer(
max_buffer_size = None,
partitions = [0],
)
# Seek to the last message
big_consumer.seek(-1, 2)
# Consume giant message successfully
message = big_consumer.get_message(block=False, timeout=10)
self.assertIsNotNone(message)
self.assertEqual(message.message.value, huge_message)
big_consumer.stop()
示例5: test_lz4_incremental
def test_lz4_incremental():
for i in xrange(1000):
# lz4 max single block size is 4MB
# make sure we test with multiple-blocks
b1 = random_string(100).encode('utf-8') * 50000
b2 = lz4_decode(lz4_encode(b1))
assert len(b1) == len(b2)
assert b1 == b2
示例6: setUpClass
def setUpClass(cls):
if not os.environ.get('KAFKA_VERSION'):
return
cls.zk = ZookeeperFixture.instance()
chroot = random_string(10)
cls.server1 = KafkaFixture.instance(0, cls.zk.host, cls.zk.port, chroot)
cls.server2 = KafkaFixture.instance(1, cls.zk.host, cls.zk.port, chroot)
cls.server = cls.server1 # Bootstrapping server
示例7: _send_random_messages
def _send_random_messages(self, producer, topic, partition, n):
for j in range(n):
msg = 'msg {0}: {1}'.format(j, random_string(10))
log.debug('_send_random_message %s to %s:%d', msg, topic, partition)
while True:
try:
producer.send_messages(topic, partition, msg.encode('utf-8'))
except:
log.exception('failure in _send_random_messages - retrying')
continue
else:
break
示例8: test_large_messages
def test_large_messages(self):
# Produce 10 "normal" size messages
small_messages = self.send_messages(0, [ str(x) for x in range(10) ])
# Produce 10 messages that are large (bigger than default fetch size)
large_messages = self.send_messages(0, [ random_string(5000) for x in range(10) ])
# Consumer should still get all of them
consumer = self.consumer()
expected_messages = set(small_messages + large_messages)
actual_messages = set([ x.message.value for x in consumer ])
self.assertEqual(expected_messages, actual_messages)
consumer.stop()
示例9: setUpClass
def setUpClass(cls): # noqa
if not os.environ.get('KAFKA_VERSION'):
return
zk_chroot = random_string(10)
replicas = 2
partitions = 2
# mini zookeeper, 2 kafka brokers
cls.zk = ZookeeperFixture.instance()
kk_args = [cls.zk.host, cls.zk.port, zk_chroot, replicas, partitions]
cls.brokers = [KafkaFixture.instance(i, *kk_args) for i in range(replicas)]
hosts = ['%s:%d' % (b.host, b.port) for b in cls.brokers]
cls.client = KafkaClient(hosts)
示例10: setUp
def setUp(self):
if not os.environ.get('KAFKA_VERSION'):
self.skipTest('integration test requires KAFKA_VERSION')
zk_chroot = random_string(10)
replicas = 3
partitions = 3
# mini zookeeper, 3 kafka brokers
self.zk = ZookeeperFixture.instance()
kk_args = [self.zk.host, self.zk.port, zk_chroot, replicas, partitions]
self.brokers = [KafkaFixture.instance(i, *kk_args) for i in range(replicas)]
hosts = ['%s:%d' % (b.host, b.port) for b in self.brokers]
self.client = SimpleClient(hosts, timeout=2)
super(TestFailover, self).setUp()
示例11: setUp
def setUp(self):
if not os.environ.get('KAFKA_VERSION'):
return
zk_chroot = random_string(10)
replicas = 2
partitions = 2
# mini zookeeper, 2 kafka brokers
self.zk = ZookeeperFixture.instance()
kk_args = [self.zk.host, self.zk.port, zk_chroot, replicas, partitions]
self.brokers = [KafkaFixture.instance(i, *kk_args) for i in range(replicas)]
hosts = ['%s:%d' % (b.host, b.port) for b in self.brokers]
self.client = KafkaClient(hosts)
super(TestFailover, self).setUp()
示例12: test_kafka_producer_proper_record_metadata
def test_kafka_producer_proper_record_metadata(kafka_broker, compression):
connect_str = ':'.join([kafka_broker.host, str(kafka_broker.port)])
producer = KafkaProducer(bootstrap_servers=connect_str,
retries=5,
max_block_ms=30000,
compression_type=compression)
magic = producer._max_usable_produce_magic()
topic = random_string(5)
future = producer.send(
topic,
value=b"Simple value", key=b"Simple key", timestamp_ms=9999999,
partition=0)
record = future.get(timeout=5)
assert record is not None
assert record.topic == topic
assert record.partition == 0
assert record.topic_partition == TopicPartition(topic, 0)
assert record.offset == 0
if magic >= 1:
assert record.timestamp == 9999999
else:
assert record.timestamp == -1 # NO_TIMESTAMP
if magic >= 2:
assert record.checksum is None
elif magic == 1:
assert record.checksum == 1370034956
else:
assert record.checksum == 3296137851
assert record.serialized_key_size == 10
assert record.serialized_value_size == 12
# generated timestamp case is skipped for broker 0.9 and below
if magic == 0:
return
send_time = time.time() * 1000
future = producer.send(
topic,
value=b"Simple value", key=b"Simple key", timestamp_ms=None,
partition=0)
record = future.get(timeout=5)
assert abs(record.timestamp - send_time) <= 1000 # Allow 1s deviation
示例13: assert_message_count
def assert_message_count(self, topic, check_count, timeout=10, partitions=None):
hosts = ",".join(["%s:%d" % (broker.host, broker.port) for broker in self.brokers])
client = KafkaClient(hosts)
group = random_string(10)
consumer = SimpleConsumer(client, group, topic, partitions=partitions, auto_commit=False, iter_timeout=timeout)
started_at = time.time()
pending = consumer.pending(partitions)
# Keep checking if it isn't immediately correct, subject to timeout
while pending != check_count and (time.time() - started_at < timeout):
pending = consumer.pending(partitions)
consumer.stop()
client.close()
self.assertEqual(pending, check_count)
示例14: test_kafka_consumer__offset_commit_resume_dual
def test_kafka_consumer__offset_commit_resume_dual(self):
GROUP_ID = random_string(10).encode('utf-8')
self.send_messages(0, range(0, 100))
self.send_messages(1, range(100, 200))
# Start a consumer
consumer1 = self.kafka_consumer(
group_id = GROUP_ID,
auto_commit_enable = True,
auto_commit_interval_ms = None,
auto_commit_interval_messages = 20,
auto_offset_reset='smallest',
offset_storage='kafka',
)
# Grab the first 195 messages
output_msgs1 = []
for _ in xrange(195):
m = consumer1.next()
output_msgs1.append(m)
consumer1.task_done(m)
self.assert_message_count(output_msgs1, 195)
# The total offset across both partitions should be at 180
consumer2 = self.kafka_consumer(
group_id = GROUP_ID,
auto_commit_enable = True,
auto_commit_interval_ms = None,
auto_commit_interval_messages = 20,
consumer_timeout_ms = 100,
auto_offset_reset='smallest',
offset_storage='dual',
)
# 181-200
output_msgs2 = []
with self.assertRaises(ConsumerTimeout):
while True:
m = consumer2.next()
output_msgs2.append(m)
self.assert_message_count(output_msgs2, 20)
self.assertEqual(len(set(output_msgs1) & set(output_msgs2)), 15)
示例15: test_end_to_end
def test_end_to_end(kafka_broker, compression):
if compression == 'lz4':
# LZ4 requires 0.8.2
if version() < (0, 8, 2):
return
# LZ4 python libs dont work on python2.6
elif sys.version_info < (2, 7):
return
connect_str = 'localhost:' + str(kafka_broker.port)
producer = KafkaProducer(bootstrap_servers=connect_str,
retries=5,
max_block_ms=10000,
compression_type=compression,
value_serializer=str.encode)
consumer = KafkaConsumer(bootstrap_servers=connect_str,
group_id=None,
consumer_timeout_ms=10000,
auto_offset_reset='earliest',
value_deserializer=bytes.decode)
topic = random_string(5)
messages = 100
futures = []
for i in range(messages):
futures.append(producer.send(topic, 'msg %d' % i))
ret = [f.get(timeout=30) for f in futures]
assert len(ret) == messages
producer.close()
consumer.subscribe([topic])
msgs = set()
for i in range(messages):
try:
msgs.add(next(consumer).value)
except StopIteration:
break
assert msgs == set(['msg %d' % i for i in range(messages)])