本文整理汇总了Python中nameko.testing.services.entrypoint_waiter函数的典型用法代码示例。如果您正苦于以下问题:Python entrypoint_waiter函数的具体用法?Python entrypoint_waiter怎么用?Python entrypoint_waiter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了entrypoint_waiter函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_normal
def test_normal(self, publisher_container, consumer_container, tracker):
# call 1 succeeds
payload1 = "payload1"
with entrypoint_waiter(consumer_container, 'recv'):
with entrypoint_hook(publisher_container, 'send') as send:
send(payload1)
assert tracker.call_args_list == [
call("send", payload1),
call("recv", payload1),
]
# call 2 succeeds
payload2 = "payload2"
with entrypoint_waiter(consumer_container, 'recv'):
with entrypoint_hook(publisher_container, 'send') as send:
send(payload2)
assert tracker.call_args_list == [
call("send", payload1),
call("recv", payload1),
call("send", payload2),
call("recv", payload2),
]
示例2: test_example_service
def test_example_service(container_factory, rabbit_config):
db_uri = 'sqlite:///{}'.format(tempfile.NamedTemporaryFile().name)
engine = create_engine(db_uri)
FooModel.metadata.create_all(engine)
config = {
ORM_DB_URIS_KEY: {
'foo-service:foo_base': db_uri
}
}
config.update(rabbit_config)
container = container_factory(FooService, config)
spam_waiter = entrypoint_waiter(container, 'handle_spam')
foo_waiter = entrypoint_waiter(container, 'foo')
with spam_waiter, foo_waiter:
container.start()
handle_spam_called.assert_called_with('ham & eggs')
handle_foo_called.assert_called_with('message')
entries = list(engine.execute('SELECT data FROM spam LIMIT 1'))
assert entries == [('ham',)]
container.stop()
示例3: test_service_pooled_events_multiple_handlers
def test_service_pooled_events_multiple_handlers(
rabbit_manager, rabbit_config, start_containers):
vhost = rabbit_config['vhost']
(container,) = start_containers(DoubleServicePoolHandler, ("double",))
# we should have two queues with a consumer each
foo_queue_1 = rabbit_manager.get_queue(
vhost, "evt-srcservice-eventtype--double.handle_1")
assert len(foo_queue_1['consumer_details']) == 1
foo_queue_2 = rabbit_manager.get_queue(
vhost, "evt-srcservice-eventtype--double.handle_2")
assert len(foo_queue_2['consumer_details']) == 1
exchange_name = "srcservice.events"
with entrypoint_waiter(container, 'handle_1'):
with entrypoint_waiter(container, 'handle_2'):
rabbit_manager.publish(
vhost, exchange_name, 'eventtype', '"msg"',
properties=dict(content_type='application/json')
)
# each handler (3 of them) of the two services should have received the evt
assert len(events) == 2
# two worker instances would have been created to deal with the handling
assert len(services['double']) == 2
assert services['double'][0].events == ["msg"]
assert services['double'][1].events == ["msg"]
示例4: test_message_ack_regression
def test_message_ack_regression(
self, container, publish, toxiproxy, lock, tracker
):
""" Regression for https://github.com/nameko/nameko/issues/511
"""
# prevent workers from completing
lock.acquire()
# fire entrypoint and block the worker;
# break connection while the worker is active, then release worker
with entrypoint_waiter(container, 'echo') as result:
publish('msg1')
while not lock._waiters:
eventlet.sleep() # pragma: no cover
toxiproxy.disable()
# allow connection to close before releasing worker
eventlet.sleep(.1)
lock.release()
# entrypoint will return and attempt to ack initiating message
assert result.get() == "msg1"
# enabling connection will re-deliver the initiating message
# and it will be processed again
with entrypoint_waiter(container, 'echo') as result:
toxiproxy.enable()
assert result.get() == "msg1"
# connection re-established, container should work again
with entrypoint_waiter(container, 'echo', timeout=1) as result:
publish('msg2')
assert result.get() == 'msg2'
示例5: test_entrypoint_waiter_duplicates
def test_entrypoint_waiter_duplicates(container_factory, rabbit_config):
container = container_factory(Service, rabbit_config)
with pytest.raises(RuntimeError) as exc:
with entrypoint_waiter(container, "working"):
with entrypoint_waiter(container, "working"):
pass
assert "already registered" in str(exc)
示例6: test_reuse_when_recovered
def test_reuse_when_recovered(
self, publisher_container, consumer_container, tracker, toxiproxy
):
""" Verify we detect and recover from stale connections.
Publish confirms are required for this functionality. Without confirms
the later messages are silently lost and the test hangs waiting for a
response.
"""
# call 1 succeeds
payload1 = "payload1"
with entrypoint_waiter(consumer_container, 'recv'):
with entrypoint_hook(publisher_container, 'send') as send:
send(payload1)
assert tracker.call_args_list == [
call("send", payload1),
call("recv", payload1),
]
with toxiproxy.disabled():
# call 2 fails
payload2 = "payload2"
with pytest.raises(IOError) as exc_info:
with entrypoint_hook(publisher_container, 'send') as send:
send(payload2)
assert (
# expect the write to raise a BrokenPipe or, if it succeeds,
# the socket to be closed on the subsequent confirmation read
"Broken pipe" in str(exc_info.value) or
"Socket closed" in str(exc_info.value)
)
assert tracker.call_args_list == [
call("send", payload1),
call("recv", payload1),
call("send", payload2),
]
# call 3 succeeds
payload3 = "payload3"
with entrypoint_waiter(consumer_container, 'recv'):
with entrypoint_hook(publisher_container, 'send') as send:
send(payload3)
assert tracker.call_args_list == [
call("send", payload1),
call("recv", payload1),
call("send", payload2),
call("send", payload3),
call("recv", payload3),
]
示例7: test_reliable_delivery
def test_reliable_delivery(
rabbit_manager, rabbit_config, start_containers, container_factory
):
""" Events sent to queues declared by ``reliable_delivery`` handlers
should be received even if no service was listening when they were
dispatched.
"""
vhost = rabbit_config['vhost']
(container,) = start_containers(ServicePoolHandler, ('service-pool',))
# test queue created, with one consumer
queue_name = "evt-srcservice-eventtype--service-pool.handle"
queue = rabbit_manager.get_queue(vhost, queue_name)
assert len(queue['consumer_details']) == 1
# publish an event
exchange_name = "srcservice.events"
with entrypoint_waiter(container, 'handle'):
rabbit_manager.publish(
vhost, exchange_name, 'eventtype', '"msg_1"',
properties=dict(content_type='application/json')
)
# wait for the event to be received
assert events == ["msg_1"]
# stop container, check queue still exists, without consumers
container.stop()
queues = rabbit_manager.get_queues(vhost)
assert queue_name in [q['name'] for q in queues]
queue = rabbit_manager.get_queue(vhost, queue_name)
assert len(queue['consumer_details']) == 0
# publish another event while nobody is listening
rabbit_manager.publish(vhost, exchange_name, 'eventtype', '"msg_2"',
properties=dict(content_type='application/json'))
# verify the message gets queued
messages = rabbit_manager.get_messages(vhost, queue_name, requeue=True)
assert ['"msg_2"'] == [msg['payload'] for msg in messages]
# start another container
class ServicePool(ServicePoolHandler):
name = "service-pool"
container = container_factory(ServicePool, rabbit_config)
with entrypoint_waiter(container, 'handle'):
container.start()
# check the new service to collects the pending event
assert len(events) == 2
assert events == ["msg_1", "msg_2"]
示例8: test_call_id_over_events
def test_call_id_over_events(rabbit_config, predictable_call_ids,
runner_factory):
one_called = Mock()
two_called = Mock()
stack_request = Mock()
LoggingWorkerContext = get_logging_worker_context(stack_request)
class EventListeningServiceOne(object):
name = "listener_one"
@event_handler('event_raiser', 'hello')
def hello(self, name):
one_called()
class EventListeningServiceTwo(object):
name = "listener_two"
@event_handler('event_raiser', 'hello')
def hello(self, name):
two_called()
class EventRaisingService(object):
name = "event_raiser"
dispatch = EventDispatcher()
@rpc
def say_hello(self):
self.dispatch('hello', self.name)
runner = runner_factory(rabbit_config)
runner.add_service(EventListeningServiceOne, LoggingWorkerContext)
runner.add_service(EventListeningServiceTwo, LoggingWorkerContext)
runner.add_service(EventRaisingService, LoggingWorkerContext)
runner.start()
container = get_container(runner, EventRaisingService)
listener1 = get_container(runner, EventListeningServiceOne)
listener2 = get_container(runner, EventListeningServiceTwo)
with entrypoint_hook(container, "say_hello") as say_hello:
waiter1 = entrypoint_waiter(listener1, 'hello')
waiter2 = entrypoint_waiter(listener2, 'hello')
with waiter1, waiter2:
say_hello()
assert predictable_call_ids.call_count == 3
stack_request.assert_has_calls([
call(None),
call(['event_raiser.say_hello.0']),
call(['event_raiser.say_hello.0']),
])
示例9: test_end_to_end
def test_end_to_end(
self, container_factory, service_cls, config, sentry_stub, tracker
):
container = container_factory(service_cls, config)
container.start()
with entrypoint_waiter(sentry_stub, 'report'):
with entrypoint_hook(container, 'broken') as broken:
with entrypoint_waiter(container, 'broken'):
with pytest.raises(CustomException):
broken()
assert tracker.called
示例10: test_message_requeue_regression
def test_message_requeue_regression(
self, container, publish, toxiproxy, lock, tracker
):
""" Regression for https://github.com/nameko/nameko/issues/511
"""
# turn on requeue_on_error
consumer = get_extension(container, Consumer)
consumer.requeue_on_error = True
# make entrypoint raise the first time it's called so that
# we attempt to requeue it
class Boom(Exception):
pass
def error_once():
yield Boom("error")
while True:
yield
tracker.side_effect = error_once()
# prevent workers from completing
lock.acquire()
# fire entrypoint and block the worker;
# break connection while the worker is active, then release worker
with entrypoint_waiter(container, 'echo') as result:
publish('msg1')
while not lock._waiters:
eventlet.sleep() # pragma: no cover
toxiproxy.disable()
# allow connection to close before releasing worker
eventlet.sleep(.1)
lock.release()
# entrypoint will return and attempt to requeue initiating message
with pytest.raises(Boom):
result.get()
# enabling connection will re-deliver the initiating message
# and it will be processed again
with entrypoint_waiter(container, 'echo', timeout=1) as result:
toxiproxy.enable()
assert result.get() == 'msg1'
# connection re-established, container should work again
with entrypoint_waiter(container, 'echo', timeout=1) as result:
publish('msg2')
assert result.get() == 'msg2'
示例11: test_entrypoint_waiter_wait_until_stops_raising
def test_entrypoint_waiter_wait_until_stops_raising(
container_factory, rabbit_config, spawn_thread
):
threshold = 5
class NotEnough(Exception):
pass
class Service(object):
name = "service"
@event_handler('srcservice', 'eventtype')
def handle_event(self, msg):
if msg < threshold:
raise NotEnough(msg)
return msg
container = container_factory(Service, rabbit_config)
container.start()
def cb(worker_ctx, res, exc_info):
return exc_info is None
def increment_forever():
dispatch = event_dispatcher(rabbit_config)
for count in itertools.count():
dispatch('srcservice', 'eventtype', count)
time.sleep() # force yield
with entrypoint_waiter(container, 'handle_event', callback=cb) as result:
spawn_thread(increment_forever)
assert result.get() == threshold
示例12: test_event_dispatcher_over_ssl
def test_event_dispatcher_over_ssl(
self, container_factory, rabbit_ssl_config, rabbit_config
):
class Dispatcher(object):
name = "dispatch"
dispatch = EventDispatcher()
@dummy
def method(self, payload):
return self.dispatch("event-type", payload)
class Handler(object):
name = "handler"
@event_handler("dispatch", "event-type")
def echo(self, payload):
return payload
dispatcher = container_factory(Dispatcher, rabbit_ssl_config)
dispatcher.start()
handler = container_factory(Handler, rabbit_config)
handler.start()
with entrypoint_waiter(handler, 'echo') as result:
with entrypoint_hook(dispatcher, 'method') as dispatch:
dispatch("payload")
assert result.get() == "payload"
示例13: test_requeue_on_error
def test_requeue_on_error(rabbit_manager, rabbit_config, start_containers):
vhost = rabbit_config['vhost']
(container,) = start_containers(RequeueingHandler, ('requeue',))
# the queue should been created and have one consumer
queue = rabbit_manager.get_queue(
vhost, "evt-srcservice-eventtype--requeue.handle")
assert len(queue['consumer_details']) == 1
counter = itertools.count()
def entrypoint_fired_twice(worker_ctx, res, exc_info):
return next(counter) > 1
with entrypoint_waiter(
container, 'handle', callback=entrypoint_fired_twice
):
rabbit_manager.publish(
vhost, "srcservice.events", 'eventtype', '"msg"',
properties=dict(content_type='application/json')
)
# the event will be received multiple times as it gets requeued and then
# consumed again
assert len(events) > 1
# multiple instances of the service should have been instantiated
assert len(services['requeue']) > 1
# each instance should have received one event
for service in services['requeue']:
assert service.events == ["msg"]
示例14: test_runner_with_duplicate_services
def test_runner_with_duplicate_services(
runner_factory, rabbit_config, service_cls, tracker
):
# host Service multiple times
runner = runner_factory(rabbit_config)
runner.add_service(service_cls)
runner.add_service(service_cls) # no-op
runner.start()
# it should only be hosted once
assert len(runner.containers) == 1
container = list(runner.containers)[0]
# test events (only one service is hosted)
event_data = "event"
dispatch = event_dispatcher(rabbit_config)
with entrypoint_waiter(container, "handle"):
dispatch('srcservice', "testevent", event_data)
assert tracker.call_args_list == [call(event_data)]
# test rpc
arg = "arg"
with ServiceRpcProxy("service", rabbit_config) as proxy:
proxy.handle(arg)
assert tracker.call_args_list == [call(event_data), call(arg)]
示例15: test_deadlock_due_to_slow_workers
def test_deadlock_due_to_slow_workers(
self, service_cls, container_factory, config
):
""" Deadlock will occur if the unack'd messages grows beyond the
size of the worker pool at any point. The QueueConsumer will block
waiting for a worker and pending RPC replies will not be ack'd.
Any running workers therefore never complete, and the worker pool
remains exhausted.
"""
container = container_factory(service_cls, config)
container.start()
count = 2
dispatch = event_dispatcher(config)
for _ in range(count):
dispatch("service", "event1", 1)
dispatch("service", "event2", 1)
counter = itertools.count(start=1)
def cb(worker_ctx, res, exc_info):
if next(counter) == count:
return True
with entrypoint_waiter(
container, 'handle_event1', timeout=5, callback=cb
):
pass