本文整理汇总了Python中contextlib.AsyncExitStack方法的典型用法代码示例。如果您正苦于以下问题:Python contextlib.AsyncExitStack方法的具体用法?Python contextlib.AsyncExitStack怎么用?Python contextlib.AsyncExitStack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类contextlib
的用法示例。
在下文中一共展示了contextlib.AsyncExitStack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def run(self):
# The merged dataflow
merged: Dict[str, Any] = {}
# For entering ConfigLoader contexts
async with contextlib.AsyncExitStack() as exit_stack:
# Load config loaders we'll need as we see their file types
parsers: Dict[str, BaseConfigLoader] = {}
for path in self.dataflows:
_, exported = await BaseConfigLoader.load_file(
parsers, exit_stack, path
)
merge(merged, exported, list_append=True)
# Export the dataflow
dataflow = DataFlow._fromdict(**merged)
async with self.configloader(BaseConfig()) as configloader:
async with configloader() as loader:
exported = dataflow.export(linked=not self.not_linked)
print((await loader.dumpb(exported)).decode())
示例2: apply
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def apply(self, connection: ConnectionAPI) -> AsyncIterator[asyncio.Future[None]]:
"""
See LogicAPI.apply()
The future returned here will be done when the first of the futures obtained from applying
all behaviors of this application is done.
"""
self.connection = connection
async with contextlib.AsyncExitStack() as stack:
futures: List[asyncio.Future[None]] = []
# First apply all the child behaviors
for behavior in self._behaviors:
if behavior.should_apply_to(connection):
fut = await stack.enter_async_context(behavior.apply(connection))
futures.append(fut)
# If none of our behaviors were applied, use a never-ending Future so that callsites
# can wait on it like when behaviors are applied.
if not futures:
futures.append(asyncio.Future())
# Now register ourselves with the connection.
with connection.add_logic(self.name, self):
yield asyncio.create_task(wait_first(futures))
示例3: __init__
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def __init__(
self,
pt_exec: Union[List[str], List[bytes]],
state: Union[str, bytes, os.PathLike],
*,
exit_on_stdin_close: bool = True,
) -> None:
"""Create the adapter.
Args:
pt_exec: The pluggable transport command line to execute. This has
to be a list of str / bytes, since
:func:`asyncio.create_subprocess_exec` does not accept an
entire command line as a string. On non-Windows platforms
:func:`shlex.split` can be used to split a command line string
into a list, while on Windows it's a bit more complicated.
state: The state directory. This is a directory where the PT is
allowed to store state. Either specify a path (which
is not required to exist, in which case the PT will create
the directory), or specify ``None`` to use a temporary
directory created using :mod:`tempfile`.
exit_on_stdin_close: Whether closing the PT's STDIN indicates the
PT should gracefully exit.
"""
if isinstance(pt_exec, (str, bytes)):
self._pt_args = [pt_exec]
else:
self._pt_args = list(pt_exec)
if state is not None:
self._state = os.path.abspath(state)
else:
self._state = None
self._exit_on_stdin_close = exit_on_stdin_close
self._process: asyncio.subprocess.Process = None
self._stdout_task: asyncio.Task = None
self._ready = asyncio.Future()
self._accepted_version: str = None
self._transports: Dict[str, asyncio.Future] = {}
self._stopping = False
self._stack = contextlib.AsyncExitStack()
示例4: run_client
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def run_client(conf: configparser.ConfigParser) -> None:
pt_exec, state, tunnels = get_common_options_from_section(conf['client'])
proxy = conf['client'].get('proxy', None)
if not proxy:
proxy = None
transports = set()
handler_confs = []
for t in tunnels:
section = conf[t]
transport = section['transport']
listen_host, listen_port = str_utils.parse_hostport(section['listen'])
upstream_host, upstream_port = str_utils.parse_hostport(
section['upstream'])
args = {key[8:]: value
for key, value in section.items()
if key.startswith('options-')}
transports.add(transport)
handler_confs.append((
(listen_host, listen_port),
(transport, upstream_host, upstream_port, args),
))
adapter = adapters.ClientAdapter(
pt_exec, state, list(transports), proxy)
async with contextlib.AsyncExitStack() as stack:
await stack.enter_async_context(adapter)
for listen_args, handler_args in handler_confs:
handler = functools.partial(
handle_client_connection, adapter, *handler_args)
server = await asyncio.start_server(handler, *listen_args)
await stack.enter_async_context(server)
await adapter.wait()
raise RuntimeError('PT process exited unexpectedly')
示例5: setUp
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def setUp(self):
self.exit_stack = contextlib.AsyncExitStack()
await self.exit_stack.__aenter__()
self.tserver = await self.exit_stack.enter_async_context(
ServerRunner.patch(Server)
)
self.cli = Server(port=0, insecure=True)
await self.tserver.start(self.cli.run())
# Set up client
self.session = await self.exit_stack.enter_async_context(
aiohttp.ClientSession()
)
示例6: __aenter__
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def __aenter__(self):
self._stack = AsyncExitStack()
await self._stack.__aenter__()
for item in self.data:
await self._stack.enter_async_context(item)
return self
示例7: load_single_file
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def load_single_file(
cls,
parsers: Dict[str, "BaseConfigLoader"],
exit_stack: contextlib.AsyncExitStack,
path: pathlib.Path,
*,
base_dir: Optional[pathlib.Path] = None,
) -> Dict:
"""
Load one file and load the ConfigLoader for it if necessary, using the
AsyncExitStack provided.
"""
filetype = path.suffix.replace(".", "")
# Load the parser for the filetype if it isn't already loaded
if not filetype in parsers:
# TODO Get configs for loaders from somewhere, probably the
# config of the multicomm
loader_cls = cls.load(filetype)
loader = await exit_stack.enter_async_context(
loader_cls(BaseConfig())
)
parsers[filetype] = await exit_stack.enter_async_context(loader())
# The config will be stored by its unique filepath split on dirs
config_path = list(
path.parts[len(base_dir.parts) :]
if base_dir is not None
else path.parts
)
# Get rid of suffix for last member of path
if config_path:
config_path[-1] = path.stem
config_path = tuple(config_path)
# Load the file
return config_path, await parsers[filetype].loadb(path.read_bytes())
示例8: load_file
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def load_file(
cls,
parsers: Dict[str, "BaseConfigLoader"],
exit_stack: contextlib.AsyncExitStack,
path: pathlib.Path,
*,
base_dir: Optional[pathlib.Path] = None,
) -> Dict:
async def _get_config(temp_filepath):
if not isinstance(temp_filepath, pathlib.Path):
temp_filepath = pathlib.Path(temp_filepath)
config_path, loaded = await BaseConfigLoader.load_single_file(
parsers, exit_stack, temp_filepath, base_dir=base_dir
)
return config_path, loaded
async def _get_config_aux(temp_filepath):
_, loaded = await _get_config(temp_filepath)
return loaded
if len(path.suffixes) >= 2 and path.suffixes[-2] == ".dirconf":
dir_name = path.parts[-1].split(".")[0]
dir_path = os.path.join(*(path.parts[:-1] + (dir_name,)))
temp_conf_dict = {dir_name: dir_path}
config_path, conf_dict = await _get_config(path)
explored = explore_directories(temp_conf_dict)
explored = await nested_apply(explored, _get_config_aux)
conf_dict.update(explored[dir_name])
else:
config_path, conf_dict = await _get_config(path)
return config_path, conf_dict
示例9: __aenter__
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def __aenter__(self):
self.clear()
self.__stack = AsyncExitStack()
await self.__stack.__aenter__()
for item in self.parent.data:
# Equivalent to entering the Object context then calling the object
# to get the ObjectContext and entering that context. We then
# return a list of all the inner contexts
# >>> async with BaseDataFlowObject() as obj:
# >>> async with obj() as ctx:
# >>> clist.append(ctx)
citem = item()
self.logger.debug("Entering context: %r", citem)
self.data.append(await self.__stack.enter_async_context(citem))
return self
示例10: aenter_stack
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def aenter_stack(
obj: Any,
context_managers: Dict[str, AsyncContextManager],
call: bool = True,
) -> AsyncExitStack:
"""
Create a :py:class:`contextlib.AsyncExitStack` then go through each key,
value pair in the dict of async context managers. Enter the context of each
async context manager and call setattr on ``obj`` to set the attribute by
the name of ``key`` to the value yielded by the async context manager.
If ``call`` is true then the context entered will be the context returned by
calling each context manager respectively.
"""
stack = AsyncExitStack()
await stack.__aenter__()
if context_managers is not None:
for key, ctxmanager in context_managers.items():
if call:
if inspect.isfunction(ctxmanager):
ctxmanager = ctxmanager.__get__(obj, obj.__class__)
setattr(
obj, key, await stack.enter_async_context(ctxmanager())
)
else:
setattr(obj, key, await stack.enter_async_context(ctxmanager))
return stack
示例11: setUp
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def setUp(self):
super().setUp()
self._stack = contextlib.ExitStack().__enter__()
self._astack = await contextlib.AsyncExitStack().__aenter__()
示例12: __aenter__
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def __aenter__(self) -> "MemoryRedundancyCheckerContext":
self.__stack = AsyncExitStack()
await self.__stack.__aenter__()
self.kvctx = await self.__stack.enter_async_context(
self.parent.kvstore()
)
return self
示例13: _run_background_services
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def _run_background_services(
services: Sequence[ServiceAPI],
runner: Callable[[ServiceAPI], AsyncContextManager[ManagerAPI]]
) -> None:
async with contextlib.AsyncExitStack() as stack:
managers = tuple([
await stack.enter_async_context(runner(service))
for service in services
])
# If any of the services terminate, we do so as well.
await wait_first([
asyncio.create_task(manager.wait_finished())
for manager in managers
])
示例14: run
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def run(self) -> None:
self._start_time = time.monotonic()
self.connection.add_command_handler(Disconnect, cast(HandlerFn, self._handle_disconnect))
try:
async with contextlib.AsyncExitStack() as stack:
fut = await stack.enter_async_context(P2PAPI().as_behavior().apply(self.connection))
futures = [fut]
self.p2p_api = self.connection.get_logic('p2p', P2PAPI)
for behavior in self.get_behaviors():
if behavior.should_apply_to(self.connection):
future = await stack.enter_async_context(behavior.apply(self.connection))
futures.append(future)
self.connection.add_msg_handler(self._handle_subscriber_message)
self.setup_protocol_handlers()
# The `boot` process is run in the background to allow the `run` loop
# to continue so that all of the Peer APIs can be used within the
# `boot` task.
self.manager.run_child_service(self.boot_manager)
# Trigger the connection to start feeding messages though the handlers
self.connection.start_protocol_streams()
self.ready.set()
try:
await wait_first(futures)
except asyncio.CancelledError:
raise
except BaseException:
self.logger.exception("Behavior finished before us, cancelling ourselves")
self.manager.cancel()
finally:
for callback in self._finished_callbacks:
callback(self)
if (self.p2p_api.local_disconnect_reason is None and
self.p2p_api.remote_disconnect_reason is None):
self._send_disconnect(DisconnectReason.CLIENT_QUITTING)
# We run as a child service of the connection, but we don't want to leave a connection
# open if somebody cancels just us, so this ensures the connection gets closed as well.
if not self.connection.get_manager().is_cancelled:
self.logger.debug("Connection hasn't been cancelled yet, doing so now")
self.connection.get_manager().cancel()
示例15: test_proxy_peer_requests
# 需要导入模块: import contextlib [as 别名]
# 或者: from contextlib import AsyncExitStack [as 别名]
def test_proxy_peer_requests(request,
event_bus,
other_event_bus,
event_loop,
chaindb_20,
client_and_server):
server_event_bus = event_bus
client_event_bus = other_event_bus
client_peer, server_peer = client_and_server
client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)
async with contextlib.AsyncExitStack() as stack:
await stack.enter_async_context(run_peer_pool_event_server(
client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
))
await stack.enter_async_context(run_peer_pool_event_server(
server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
))
await stack.enter_async_context(background_asyncio_service(ETHRequestServer(
server_event_bus,
TO_NETWORKING_BROADCAST_CONFIG,
AsyncChainDB(chaindb_20.db)
)))
client_proxy_peer_pool = ETHProxyPeerPool(client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
await stack.enter_async_context(background_asyncio_service(client_proxy_peer_pool))
proxy_peer_pool = ETHProxyPeerPool(server_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
await stack.enter_async_context(background_asyncio_service(proxy_peer_pool))
proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.session)
headers = await proxy_peer.eth_api.get_block_headers(0, 1, 0, False)
assert len(headers) == 1
block_header = headers[0]
assert block_header.block_number == 0
receipts = await proxy_peer.eth_api.get_receipts(headers)
assert len(receipts) == 1
receipt = receipts[0]
assert receipt[1][0] == block_header.receipt_root
block_bundles = await proxy_peer.eth_api.get_block_bodies(headers)
assert len(block_bundles) == 1
first_bundle = block_bundles[0]
assert first_bundle[1][0] == block_header.transaction_root
node_data = await proxy_peer.eth_api.get_node_data((block_header.state_root,))
assert node_data[0][0] == block_header.state_root