本文整理匯總了Python中trio.move_on_after方法的典型用法代碼示例。如果您正苦於以下問題:Python trio.move_on_after方法的具體用法?Python trio.move_on_after怎麽用?Python trio.move_on_after使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類trio
的用法示例。
在下文中一共展示了trio.move_on_after方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def main():
t0 = datetime.datetime.now()
print(colorama.Fore.WHITE + "App started.", flush=True)
"""
trio.Queue was removed in v0.11.0:
- Replacing the call to trio.Queue() by trio.open_memory_channel()
- Using a MemorySendChannel object in generate_data function
- Using a MemoryReceiveChannel object in process_data function
- Updating requirements.txt with trio v0.16.0 and trio_asyncio v0.11.0
"""
send_channel, receive_channel = trio.open_memory_channel(max_buffer_size=10)
with trio.move_on_after(5):
async with trio.open_nursery() as nursery:
nursery.start_soon(generate_data, 20, send_channel, name='Prod 1')
nursery.start_soon(generate_data, 20, send_channel, name='Prod 2')
nursery.start_soon(process_data, 40, receive_channel, name='Consumer')
dt = datetime.datetime.now() - t0
print(colorama.Fore.WHITE + "App exiting, total time: {:,.2f} sec.".format(
dt.total_seconds()), flush=True)
示例2: init_for_server
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def init_for_server(cls, stream):
ws = WSConnection(ConnectionType.SERVER)
transport = cls(stream, ws)
# Wait for client to init WebSocket handshake
event = "Websocket handshake timeout"
with trio.move_on_after(WEBSOCKET_HANDSHAKE_TIMEOUT):
event = await transport._next_ws_event()
if isinstance(event, Request):
transport.logger.debug("Accepting WebSocket upgrade")
await transport._net_send(AcceptConnection())
return transport
transport.logger.warning("Unexpected event during WebSocket handshake", ws_event=event)
raise TransportError(f"Unexpected event during WebSocket handshake: {event}")
示例3: api_user_invite
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def api_user_invite(self, client_ctx, msg):
if client_ctx.profile != UserProfile.ADMIN:
return {"status": "not_allowed", "reason": "Only allowed for user with ADMIN profile."}
msg = apiv1_user_invite_serializer.req_load(msg)
# Setting the cancel scope here instead of just were we are waiting
# for the event make testing easier.
with trio.move_on_after(PEER_EVENT_MAX_WAIT) as cancel_scope:
rep = await run_with_breathing_transport(
client_ctx.transport, self._api_user_invite, client_ctx, msg
)
if cancel_scope.cancelled_caught:
rep = {
"status": "timeout",
"reason": "Timeout while waiting for new user to be claimed.",
}
return apiv1_user_invite_serializer.rep_dump(rep)
示例4: api_user_claim
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def api_user_claim(self, client_ctx, msg):
msg = apiv1_user_claim_serializer.req_load(msg)
# Setting the cancel scope here instead of just were we are waiting
# for the event make testing easier.
with trio.move_on_after(PEER_EVENT_MAX_WAIT) as cancel_scope:
rep = await run_with_breathing_transport(
client_ctx.transport, self._api_user_claim, client_ctx, msg
)
if cancel_scope.cancelled_caught:
rep = {
"status": "timeout",
"reason": "Timeout while waiting for invitation creator to answer.",
}
return apiv1_user_claim_serializer.rep_dump(rep)
示例5: api_device_invite
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def api_device_invite(self, client_ctx, msg):
msg = apiv1_device_invite_serializer.req_load(msg)
# Setting the cancel scope here instead of just were we are waiting
# for the event make testing easier.
with trio.move_on_after(PEER_EVENT_MAX_WAIT) as cancel_scope:
rep = await run_with_breathing_transport(
client_ctx.transport, self._api_device_invite, client_ctx, msg
)
if cancel_scope.cancelled_caught:
rep = {
"status": "timeout",
"reason": "Timeout while waiting for new device to be claimed.",
}
return apiv1_device_invite_serializer.rep_dump(rep)
示例6: _schedule_task
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def _schedule_task(self):
'''
Wait until a scheduled event is due, then start a new crawl job.
:returns: This method runs until cancelled.
'''
while True:
if not self._events:
await self._event_added.wait()
continue
next_event = self._events[0]
if not next_event.is_due:
with trio.move_on_after(next_event.seconds_until_due):
await self._event_added.wait()
continue
self._events.pop(0)
logger.info('Scheduled job "%s" is ready to start.',
next_event.schedule.name)
await self._start_scheduled_job(next_event)
示例7: wait
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def wait(self):
'''Wait until an object has been uploaded
If there are no objects in transit, return immediately.
'''
# Loop to avoid the race condition of a transfer terminating
# between the call to transfer_in_progress() and wait().
while True:
if not self.transfer_in_progress():
return
with trio.move_on_after(5):
async with self.transfer_completed:
await self.transfer_completed.wait()
return
示例8: shutdown_and_clean_up
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def shutdown_and_clean_up(self):
# When this method is called, it's because we definitely want to kill
# this connection, either as a clean shutdown or because of some kind
# of error or loss-of-sync bug, and we no longer care if that violates
# the protocol or not. So we ignore the state of self.conn, and just
# go ahead and do the shutdown on the socket directly. (If you're
# implementing a client you might prefer to send ConnectionClosed()
# and let it raise an exception if that violates the protocol.)
#
try:
await self.stream.send_eof()
except trio.BrokenResourceError:
# They're already gone, nothing to do
return
# Wait and read for a bit to give them a chance to see that we closed
# things, but eventually give up and just close the socket.
# XX FIXME: possibly we should set SO_LINGER to 0 here, so
# that in the case where the client has ignored our shutdown and
# declined to initiate the close themselves, we do a violent shutdown
# (RST) and avoid the TIME_WAIT?
# it looks like nginx never does this for keepalive timeouts, and only
# does it for regular timeouts (slow clients I guess?) if explicitly
# enabled ("Default: reset_timedout_connection off")
with trio.move_on_after(TIMEOUT):
try:
while True:
# Attempt to read until EOF
got = await self.stream.receive_some(MAX_RECV)
if not got:
break
except trio.BrokenResourceError:
pass
finally:
await self.stream.aclose()
示例9: __call__
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def __call__(self, fn):
@wraps(fn)
async def wrapper(*args, **kwargs):
with trio.move_on_after(self._seconds) as cancel_scope:
await fn(*args, **kwargs)
if cancel_scope.cancelled_caught:
pytest.fail('Test runtime exceeded the maximum {} seconds'
.format(self._seconds))
return wrapper
示例10: _get_next_expiry
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def _get_next_expiry(self):
'''
Pop an expiry off the heap.
If no tokens on heap, suspend until a token is available.
:returns: The next expiry.
:rtype: Expiry
'''
while True:
if not self._expires:
# If there are no pending expirations, then we wait for a new
# token or a reset of an existing token.
with trio.CancelScope() as cancel_scope:
self._expiry_cancel_scope = cancel_scope
await trio.sleep_forever()
continue
# Now there are definitely pending expirations. Examine the earliest
# pending expiration. If it is in the past, then we pop it
# immediately. If it is in the future, then sleep until its
# expiration time or until somebody adds or resets a token.
now = trio.current_time()
expires = self._expires[0].time
if expires <= now:
expiry = heappop(self._expires)
return expiry
with trio.move_on_after(expires - now) as cancel_scope:
self._expiry_cancel_scope = cancel_scope
await trio.sleep_forever()
continue
示例11: _queue_upload
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def _queue_upload(self, obj):
'''Put *obj* into upload queue'''
while True:
with trio.move_on_after(5):
await self.to_upload[0].send(obj)
return
for t in self.upload_threads:
if t.is_alive():
break
else:
raise NoWorkerThreads('no upload threads')
示例12: run
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def run(self):
log.debug('started')
while not self.stop_event.is_set():
did_sth = False
stamp = time.time()
# Need to make copy, since we aren't allowed to change
# dict while iterating through it. The performance hit doesn't seem
# to be that bad:
# >>> from timeit import timeit
# >>> timeit("k=0\nfor el in list(d.values()):\n k += el",
# ... setup='\nfrom collections import OrderedDict\nd = OrderedDict()\nfor i in range(5000):\n d[i]=i\n',
# ... number=500)/500 * 1e3
# 1.3769531380003173
# >>> timeit("k=0\nfor el in d.values(n:\n k += el",
# ... setup='\nfrom collections import OrderedDict\nd = OrderedDict()\nfor i in range(5000):\n d[i]=i\n',
# ... number=500)/500 * 1e3
# 1.456586996000624
for el in list(self.block_cache.cache.values()):
if self.stop_event.is_set() or stamp - el.last_write < 10:
break
if el.dirty and el not in self.block_cache.in_transit:
await self.block_cache.upload_if_dirty(el)
did_sth = True
if not did_sth:
with trio.move_on_after(5):
await self.stop_event.wait()
log.debug('finished')
示例13: _handle_connection
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def _handle_connection(self, stream):
'''
Handle an incoming connection by spawning a connection background task
and a handler task inside a new nursery.
:param stream:
:type stream: trio.abc.Stream
'''
async with trio.open_nursery() as nursery:
wsproto = WSConnection(ConnectionType.SERVER)
connection = WebSocketConnection(stream, wsproto,
message_queue_size=self._message_queue_size,
max_message_size=self._max_message_size)
nursery.start_soon(connection._reader_task)
with trio.move_on_after(self._connect_timeout) as connect_scope:
request = await connection._get_request()
if connect_scope.cancelled_caught:
nursery.cancel_scope.cancel()
await stream.aclose()
return
try:
await self._handler(request)
finally:
with trio.move_on_after(self._disconnect_timeout):
# aclose() will shut down the reader task even if its
# cancelled:
await connection.aclose()
示例14: test_beacon_node_can_count_slots
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def test_beacon_node_can_count_slots(autojump_clock, eth2_config, beacon_node):
some_slots = 10
a_future_slot = beacon_node.current_tick.slot + some_slots
seconds = some_slots * eth2_config.SECONDS_PER_SLOT
with trio.move_on_after(seconds):
await beacon_node.run()
assert beacon_node.current_tick.slot == a_future_slot
示例15: test_hosts_can_gossip_blocks
# 需要導入模塊: import trio [as 別名]
# 或者: from trio import move_on_after [as 別名]
def test_hosts_can_gossip_blocks(host_factory):
host_a_blocks = set()
host_a, host_a_listen_maddr = host_factory("a", host_a_blocks)
host_b_blocks = set()
host_b, host_b_listen_maddr = host_factory("b", host_b_blocks)
with trio.move_on_after(2 * 60):
async with _run_host(host_a, host_a_listen_maddr):
async with _run_host(host_b, host_b_listen_maddr):
await host_b.add_peer_from_maddr(host_a_listen_maddr)
await host_a.subscribe_gossip_channels()
await host_b.subscribe_gossip_channels()
# NOTE: subscription fails to register if we do not sleep here...
# Need to debug inside `libp2p`...
await trio.sleep(1)
block = SignedBeaconBlock.create(signature=b"\xcc" * 96)
await host_a.broadcast_block(block)
block_source = host_b.stream_block_gossip()
gossiped_block = await block_source.__anext__()
assert gossiped_block == block
# NOTE: the following is racy...
# Need to debug inside `libp2p`...
await host_a.unsubscribe_gossip_channels()
await trio.sleep(1)
await host_b.unsubscribe_gossip_channels()