本文整理匯總了Python中aiokafka.client.AIOKafkaClient.close方法的典型用法代碼示例。如果您正苦於以下問題:Python AIOKafkaClient.close方法的具體用法?Python AIOKafkaClient.close怎麽用?Python AIOKafkaClient.close使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類aiokafka.client.AIOKafkaClient
的用法示例。
在下文中一共展示了AIOKafkaClient.close方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_coordinator_workflow
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_coordinator_workflow(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
yield from self.wait_topic(client, 'topic1')
yield from self.wait_topic(client, 'topic2')
subscription = SubscriptionState('latest')
subscription.subscribe(topics=('topic1', 'topic2'))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
session_timeout_ms=10000,
heartbeat_interval_ms=500,
retry_backoff_ms=100)
self.assertEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.rejoin_needed, True)
yield from coordinator.ensure_coordinator_known()
self.assertNotEqual(coordinator.coordinator_id, None)
yield from coordinator.ensure_active_group()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.rejoin_needed, False)
tp_list = subscription.assigned_partitions()
self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
('topic2', 0), ('topic2', 1)]))
# start second coordinator
client2 = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client2.bootstrap()
subscription2 = SubscriptionState('latest')
subscription2.subscribe(topics=('topic1', 'topic2'))
coordinator2 = GroupCoordinator(
client2, subscription2, loop=self.loop,
session_timeout_ms=10000,
heartbeat_interval_ms=500,
retry_backoff_ms=100)
yield from coordinator2.ensure_active_group()
yield from coordinator.ensure_active_group()
tp_list = subscription.assigned_partitions()
self.assertEqual(len(tp_list), 2)
tp_list2 = subscription2.assigned_partitions()
self.assertEqual(len(tp_list2), 2)
tp_list |= tp_list2
self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
('topic2', 0), ('topic2', 1)]))
yield from coordinator.close()
yield from client.close()
yield from asyncio.sleep(0.6, loop=self.loop) # wait heartbeat
yield from coordinator2.ensure_active_group()
tp_list = subscription2.assigned_partitions()
self.assertEqual(tp_list, set([('topic1', 0), ('topic1', 1),
('topic2', 0), ('topic2', 1)]))
yield from coordinator2.close()
yield from client2.close()
示例2: test_close
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_close(self):
client = AIOKafkaClient(["broker_1:4567"], loop=self.loop)
m1 = mock.Mock()
m2 = mock.Mock()
client._conns = {("host1", 4567): m1, ("host2", 5678): m2}
client.close()
self.assertEqual({}, client._conns)
m1.close.assert_raises_with()
m2.close.assert_raises_with()
示例3: test_failed_group_join
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_failed_group_join(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
subscription = SubscriptionState("latest")
subscription.subscribe(topics=("topic1",))
coordinator = GroupCoordinator(client, subscription, loop=self.loop, retry_backoff_ms=10)
yield from client.bootstrap()
yield from self.wait_topic(client, "topic1")
mocked = mock.MagicMock()
coordinator._client = mocked
# no exception expected, just wait
mocked.send.side_effect = Errors.GroupLoadInProgressError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
mocked.send.side_effect = Errors.InvalidGroupIdError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
# no exception expected, member_id should be reseted
coordinator.member_id = "some_invalid_member_id"
mocked.send.side_effect = Errors.UnknownMemberIdError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
self.assertEqual(coordinator.member_id, JoinGroupRequest.UNKNOWN_MEMBER_ID)
# no exception expected, coordinator_id should be reseted
coordinator.coordinator_id = "some_id"
mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError()
yield from coordinator._perform_group_join()
self.assertEqual(coordinator.need_rejoin(), True)
self.assertEqual(coordinator.coordinator_id, None)
yield from client.close()
示例4: test_get_offsets
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_get_offsets(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
subscription = SubscriptionState("earliest")
subscription.subscribe(topics=("topic1",))
coordinator = GroupCoordinator(client, subscription, loop=self.loop, group_id="getoffsets-group")
yield from self.wait_topic(client, "topic1")
producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=self.hosts)
yield from producer.start()
yield from producer.send("topic1", b"first msg", partition=0)
yield from producer.send("topic1", b"second msg", partition=1)
yield from producer.send("topic1", b"third msg", partition=1)
yield from producer.stop()
yield from coordinator.ensure_active_group()
offsets = {
TopicPartition("topic1", 0): OffsetAndMetadata(1, ""),
TopicPartition("topic1", 1): OffsetAndMetadata(2, ""),
}
yield from coordinator.commit_offsets(offsets)
self.assertEqual(subscription.all_consumed_offsets(), {})
subscription.seek(("topic1", 0), 0)
subscription.seek(("topic1", 1), 0)
yield from coordinator.refresh_committed_offsets()
self.assertEqual(subscription.assignment[("topic1", 0)].committed, 1)
self.assertEqual(subscription.assignment[("topic1", 1)].committed, 2)
yield from coordinator.close()
yield from client.close()
示例5: test_subscribe_pattern
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_subscribe_pattern(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
test_listener = RebalanceListenerForTest()
subscription = SubscriptionState('latest')
subscription.subscribe(pattern='st-topic*', listener=test_listener)
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='subs-pattern-group')
yield from self.wait_topic(client, 'st-topic1')
yield from self.wait_topic(client, 'st-topic2')
yield from coordinator.ensure_active_group()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.rejoin_needed, False)
tp_list = subscription.assigned_partitions()
assigned = set([('st-topic1', 0), ('st-topic1', 1),
('st-topic2', 0), ('st-topic2', 1)])
self.assertEqual(tp_list, assigned)
self.assertEqual(test_listener.revoked, [set([])])
self.assertEqual(test_listener.assigned, [assigned])
yield from coordinator.close()
yield from client.close()
示例6: test_send_request
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_send_request(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
node_id = client.get_random_node()
resp = yield from client.send(node_id, MetadataRequest([]))
self.assertTrue(isinstance(resp, MetadataResponse))
yield from client.close()
示例7: test_offsets_failed_scenarios
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_offsets_failed_scenarios(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
yield from self.wait_topic(client, 'topic1')
subscription = SubscriptionState('earliest')
subscription.subscribe(topics=('topic1',))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='test-offsets-group')
yield from coordinator.ensure_active_group()
offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
yield from coordinator.commit_offsets(offsets)
with mock.patch('kafka.common.for_code') as mocked:
mocked.return_value = Errors.GroupAuthorizationFailedError
with self.assertRaises(Errors.GroupAuthorizationFailedError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.TopicAuthorizationFailedError
with self.assertRaises(Errors.TopicAuthorizationFailedError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.InvalidCommitOffsetSizeError
with self.assertRaises(Errors.InvalidCommitOffsetSizeError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.GroupLoadInProgressError
with self.assertRaises(Errors.GroupLoadInProgressError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.RebalanceInProgressError
with self.assertRaises(Errors.RebalanceInProgressError):
yield from coordinator.commit_offsets(offsets)
self.assertEqual(subscription.needs_partition_assignment, True)
mocked.return_value = KafkaError
with self.assertRaises(KafkaError):
yield from coordinator.commit_offsets(offsets)
mocked.return_value = Errors.NotCoordinatorForGroupError
with self.assertRaises(Errors.NotCoordinatorForGroupError):
yield from coordinator.commit_offsets(offsets)
self.assertEqual(coordinator.coordinator_id, None)
with self.assertRaises(
Errors.GroupCoordinatorNotAvailableError):
yield from coordinator.commit_offsets(offsets)
yield from coordinator.close()
yield from client.close()
示例8: test_metadata_update_fail
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_metadata_update_fail(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
# Make sure the connection is initialize before mock to avoid crashing
# api_version routine
yield from client.force_metadata_update()
with mock.patch.object(
AIOKafkaConnection, 'send') as mocked:
mocked.side_effect = KafkaError('mocked exception')
updated = yield from client.force_metadata_update()
self.assertEqual(updated, False)
with self.assertRaises(KafkaError):
yield from client.fetch_all_metadata()
yield from client.close()
示例9: wait_kafka
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def wait_kafka(cls):
cls.hosts = ['{}:{}'.format(cls.kafka_host, cls.kafka_port)]
# Reconnecting until Kafka in docker becomes available
for i in range(500):
client = AIOKafkaClient(loop=cls.loop, bootstrap_servers=cls.hosts)
try:
cls.loop.run_until_complete(client.bootstrap())
# Broker can still be loading cluster layout, so we can get 0
# brokers. That counts as still not available
if client.cluster.brokers():
return
except ConnectionError:
pass
finally:
cls.loop.run_until_complete(client.close())
time.sleep(0.1)
assert False, "Kafka server never started"
示例10: test_metadata_synchronizer
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_metadata_synchronizer(self):
client = AIOKafkaClient(
loop=self.loop,
bootstrap_servers=self.hosts,
metadata_max_age_ms=100)
with mock.patch.object(
AIOKafkaClient, '_metadata_update') as mocked:
@asyncio.coroutine
def dummy(*d, **kw):
client.cluster.failed_update(None)
mocked.side_effect = dummy
yield from client.bootstrap()
yield from asyncio.sleep(0.15, loop=self.loop)
yield from client.close()
self.assertNotEqual(
len(client._metadata_update.mock_calls), 0)
示例11: test_bootstrap
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_bootstrap(self):
client = AIOKafkaClient(loop=self.loop,
bootstrap_servers='0.42.42.42:444')
with self.assertRaises(ConnectionError):
yield from client.bootstrap()
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
yield from self.wait_topic(client, 'test_topic')
metadata = yield from client.fetch_all_metadata()
self.assertTrue('test_topic' in metadata.topics())
client.set_topics(['t2', 't3'])
client.set_topics(['t2', 't3']) # should be ignored
client.add_topic('t2') # shold be ignored
# bootstrap again -- no error expected
yield from client.bootstrap()
yield from client.close()
示例12: test_fetchoffsets_failed_scenarios
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_fetchoffsets_failed_scenarios(self):
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
yield from self.wait_topic(client, 'topic1')
subscription = SubscriptionState('earliest')
subscription.subscribe(topics=('topic1',))
coordinator = GroupCoordinator(
client, subscription, loop=self.loop,
group_id='fetch-offsets-group')
yield from coordinator.ensure_active_group()
offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
with mock.patch('kafka.common.for_code') as mocked:
mocked.side_effect = MockedKafkaErrCode(
Errors.GroupLoadInProgressError, Errors.NoError)
yield from coordinator.fetch_committed_offsets(offsets)
mocked.side_effect = MockedKafkaErrCode(
Errors.UnknownMemberIdError, Errors.NoError)
with self.assertRaises(Errors.UnknownMemberIdError):
yield from coordinator.fetch_committed_offsets(offsets)
self.assertEqual(subscription.needs_partition_assignment, True)
mocked.side_effect = None
mocked.return_value = Errors.UnknownTopicOrPartitionError
r = yield from coordinator.fetch_committed_offsets(offsets)
self.assertEqual(r, {})
mocked.return_value = KafkaError
with self.assertRaises(KafkaError):
yield from coordinator.fetch_committed_offsets(offsets)
mocked.side_effect = MockedKafkaErrCode(
Errors.NotCoordinatorForGroupError,
Errors.NoError, Errors.NoError, Errors.NoError)
yield from coordinator.fetch_committed_offsets(offsets)
yield from coordinator.close()
yield from client.close()
示例13: setUp
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def setUp(self):
super().setUp()
self.hosts = ['{}:{}'.format(self.kafka_host, self.kafka_port)]
if not self.topic:
self.topic = "topic-{}-{}".format(
self.id()[self.id().rindex(".") + 1:],
random_string(10).decode('utf-8'))
# Reconnecting until Kafka in docker becomes available
client = AIOKafkaClient(
loop=self.loop, bootstrap_servers=self.hosts)
for i in range(500):
try:
self.loop.run_until_complete(client.bootstrap())
except ConnectionError:
time.sleep(0.1)
else:
self.loop.run_until_complete(client.close())
break
self._messages = {}
示例14: test_check_version
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
def test_check_version(self):
kafka_version = tuple(int(x) for x in self.kafka_version.split("."))
client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
yield from client.bootstrap()
ver = yield from client.check_version()
self.assertEqual(kafka_version[:2], ver[:2])
yield from self.wait_topic(client, 'some_test_topic')
ver2 = yield from client.check_version()
self.assertEqual(ver, ver2)
ver2 = yield from client.check_version(client.get_random_node())
self.assertEqual(ver, ver2)
with mock.patch.object(
AIOKafkaConnection, 'send') as mocked:
mocked.side_effect = KafkaError('mocked exception')
with self.assertRaises(UnrecognizedBrokerVersion):
yield from client.check_version(client.get_random_node())
client._get_conn = asyncio.coroutine(lambda _, **kw: None)
with self.assertRaises(ConnectionError):
yield from client.check_version()
yield from client.close()
示例15: AIOKafkaProducer
# 需要導入模塊: from aiokafka.client import AIOKafkaClient [as 別名]
# 或者: from aiokafka.client.AIOKafkaClient import close [as 別名]
#.........這裏部分代碼省略.........
`create_connection`_. For more information see :ref:`ssl_auth`.
Default: None.
connections_max_idle_ms (int): Close idle connections after the number
of milliseconds specified by this config. Specifying `None` will
disable idle checks. Default: 540000 (9hours).
enable_idempotence (bool): When set to ``True``, the producer will
ensure that exactly one copy of each message is written in the
stream. If ``False``, producer retries due to broker failures,
etc., may write duplicates of the retried message in the stream.
Note that enabling idempotence acks to set to 'all'. If it is not
explicitly set by the user it will be chosen. If incompatible
values are set, a ``ValueError`` will be thrown.
New in version 0.5.0.
sasl_mechanism (str): Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI. Default: PLAIN
sasl_plain_username (str): username for sasl PLAIN authentication.
Default: None
sasl_plain_password (str): password for sasl PLAIN authentication.
Default: None
Note:
Many configuration parameters are taken from the Java client:
https://kafka.apache.org/documentation.html#producerconfigs
"""
_PRODUCER_CLIENT_ID_SEQUENCE = 0
_COMPRESSORS = {
'gzip': (has_gzip, LegacyRecordBatchBuilder.CODEC_GZIP),
'snappy': (has_snappy, LegacyRecordBatchBuilder.CODEC_SNAPPY),
'lz4': (has_lz4, LegacyRecordBatchBuilder.CODEC_LZ4),
}
_closed = None # Serves as an uninitialized flag for __del__
_source_traceback = None
def __init__(self, *, loop, bootstrap_servers='localhost',
client_id=None,
metadata_max_age_ms=300000, request_timeout_ms=40000,
api_version='auto', acks=_missing,
key_serializer=None, value_serializer=None,
compression_type=None, max_batch_size=16384,
partitioner=DefaultPartitioner(), max_request_size=1048576,
linger_ms=0, send_backoff_ms=100,
retry_backoff_ms=100, security_protocol="PLAINTEXT",
ssl_context=None, connections_max_idle_ms=540000,
enable_idempotence=False, transactional_id=None,
transaction_timeout_ms=60000, sasl_mechanism="PLAIN",
sasl_plain_password=None, sasl_plain_username=None,
sasl_kerberos_service_name='kafka',
sasl_kerberos_domain_name=None):
if acks not in (0, 1, -1, 'all', _missing):
raise ValueError("Invalid ACKS parameter")
if compression_type not in ('gzip', 'snappy', 'lz4', None):
raise ValueError("Invalid compression type!")
if compression_type:
checker, compression_attrs = self._COMPRESSORS[compression_type]
if not checker():
raise RuntimeError("Compression library for {} not found"
.format(compression_type))
else:
compression_attrs = 0
if transactional_id is not None:
enable_idempotence = True
else: