本文整理匯總了Python中asyncio.gather方法的典型用法代碼示例。如果您正苦於以下問題:Python asyncio.gather方法的具體用法?Python asyncio.gather怎麽用?Python asyncio.gather使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類asyncio
的用法示例。
在下文中一共展示了asyncio.gather方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _async_wait_for_process
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def _async_wait_for_process(
future_process: Any,
out: Optional[Union[TeeCapture, IO[str]]] = sys.stdout,
err: Optional[Union[TeeCapture, IO[str]]] = sys.stderr
) -> CommandOutput:
"""Awaits the creation and completion of an asynchronous process.
Args:
future_process: The eventually created process.
out: Where to write stuff emitted by the process' stdout.
err: Where to write stuff emitted by the process' stderr.
Returns:
A (captured output, captured error output, return code) triplet.
"""
process = await future_process
future_output = _async_forward(process.stdout, out)
future_err_output = _async_forward(process.stderr, err)
output, err_output = await asyncio.gather(future_output, future_err_output)
await process.wait()
return CommandOutput(output, err_output, process.returncode)
示例2: test_main
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def test_main(self):
# Need to run within a Task, as the scope manager depends
# on Task.current_task()
async def main_task():
with self.tracer.start_active_span('parent'):
tasks = self.submit_callbacks()
await asyncio.gather(*tasks)
self.loop.create_task(main_task())
stop_loop_when(self.loop,
lambda: len(self.tracer.finished_spans()) >= 4)
self.loop.run_forever()
spans = self.tracer.finished_spans()
self.assertEquals(len(spans), 4)
self.assertNamesEqual(spans, ['task', 'task', 'task', 'parent'])
for i in range(3):
self.assertSameTrace(spans[i], spans[-1])
self.assertIsChildOf(spans[i], spans[-1])
示例3: test_websocket_non_regression_bug_105
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def test_websocket_non_regression_bug_105(event_loop, server):
# This test will check a fix to a race condition which happens if the user is trying
# to connect using the same client twice at the same time
# See bug #105
url = f"ws://{server.hostname}:{server.port}/graphql"
print(f"url = {url}")
sample_transport = WebsocketsTransport(url=url)
client = Client(transport=sample_transport)
# Create a coroutine which start the connection with the transport but does nothing
async def client_connect(client):
async with client:
await asyncio.sleep(2 * MS)
# Create two tasks which will try to connect using the same client (not allowed)
connect_task1 = asyncio.ensure_future(client_connect(client))
connect_task2 = asyncio.ensure_future(client_connect(client))
with pytest.raises(TransportAlreadyConnected):
await asyncio.gather(connect_task1, connect_task2)
示例4: make_query_loop
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def make_query_loop(tmpdir, config_data, registry):
query_loops = []
def make_loop():
config_file = tmpdir / "config.yaml"
config_file.write_text(yaml.dump(config_data), "utf-8")
with config_file.open() as fh:
config = load_config(fh, logging.getLogger())
registry.create_metrics(config.metrics.values())
query_loop = loop.QueryLoop(config, registry, logging)
query_loops.append(query_loop)
return query_loop
yield make_loop
await asyncio.gather(
*(query_loop.stop() for query_loop in query_loops), return_exceptions=True,
)
示例5: _do_ops
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def _do_ops(self, ops):
try:
for r in await asyncio.gather(*ops, return_exceptions=True):
if isinstance(r, MessageNotModifiedError):
logging.debug("db not modified", exc_info=r)
elif isinstance(r, Exception):
raise r # Makes more sense to raise even for MessageEditTimeExpiredError
elif not isinstance(r, Message):
logging.debug("unknown ret from gather, %r", r)
except MessageEditTimeExpiredError:
logging.debug("Making new channel.")
_db = self.db
self.db = None
await self._client(DeleteChannelRequest(channel=_db))
return True
return False
示例6: test_main
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def test_main(self):
# Need to run within a Task, as the scope manager depends
# on Task.current_task()
async def main_task():
with self.tracer.start_active_span('parent'):
tasks = self.submit_callbacks()
await asyncio.gather(*tasks)
self.loop.create_task(main_task())
stop_loop_when(self.loop,
lambda: len(self.tracer.finished_spans()) >= 4)
self.loop.run_forever()
spans = self.tracer.finished_spans()
self.assertEqual(len(spans), 4)
self.assertNamesEqual(spans, ['task', 'task', 'task', 'parent'])
for i in range(3):
self.assertSameTrace(spans[i], spans[-1])
self.assertIsChildOf(spans[i], spans[-1])
示例7: _cancel_all_tasks
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def _cancel_all_tasks(loop: asyncio.AbstractEventLoop) -> None:
tasks = [task for task in asyncio.all_tasks(loop) if not task.done()]
if not tasks:
return
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, loop=loop, return_exceptions=True))
for task in tasks:
if not task.cancelled() and task.exception() is not None:
loop.call_exception_handler(
{
"message": "unhandled exception during shutdown",
"exception": task.exception(),
"task": task,
}
)
示例8: test_task_local
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def test_task_local() -> None:
local_ = TaskLocal()
queue: asyncio.Queue = asyncio.Queue()
tasks = 2
for _ in range(tasks):
queue.put_nowait(None)
async def _test_local(value: int) -> int:
local_.test = value
await queue.get()
queue.task_done()
await queue.join()
return local_.test
futures = [asyncio.ensure_future(_test_local(value)) for value in range(tasks)]
asyncio.gather(*futures)
for value, future in enumerate(futures):
assert (await future) == value
示例9: run
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def run(self, host):
tasks = []
# 默認limit=100,enable_cleanup_closed設置為True防止ssl泄露,ttl_dns_cache調高dns緩存
conn = aiohttp.TCPConnector(
limit=LIMIT,
enable_cleanup_closed=True,
ttl_dns_cache=100,
ssl=False,
)
timeout = aiohttp.ClientTimeout(total=60, connect=2)
async with aiohttp.ClientSession(connector=conn, timeout=timeout) as session:
for url in self.urls:
task = asyncio.ensure_future(self.scan(host, url, session))
tasks.append(task)
# gather方法是所有請求完成後才有輸出
_ = await asyncio.gather(*tasks)
# for i in asyncio.as_completed(tasks): # 類似於線程池中的task一樣
# answer = await i
# 創建啟動任務
示例10: import_file
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def import_file(filename):
log.info("import_file: {}".format(filename))
loop = globals["loop"]
max_concurrent_tasks = config.get("max_concurrent_tasks")
tasks = []
with open(filename, 'r') as fh:
for line in fh:
line = line.rstrip()
#loop.run_until_complete(import_line(line))
tasks.append(asyncio.ensure_future(import_line(line)))
if len(tasks) < max_concurrent_tasks:
continue # get next line
# got a batch, move them out!
loop.run_until_complete(asyncio.gather(*tasks))
tasks = []
# finish any remaining tasks
loop.run_until_complete(asyncio.gather(*tasks))
globals["files_read"] += 1
示例11: _get_dispatches
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def _get_dispatches(
self, payload: Payload, request: Request
) -> Tuple[Payload, Set[str]]:
# Run all dispatchers to form our initial set of worker plugins to run
worker_plugins: Set[str] = set(
self.always_dispatch
) if self.always_dispatch else set()
dispatch_results: List[Set[str]] = await asyncio.gather( # type: ignore
*[
self._apply_dispatcher(dispatcher, payload, request)
for dispatcher in self._loaded_dispatcher_plugins.values()
]
)
for dispatch_result in dispatch_results:
worker_plugins.update(dispatch_result)
return payload, worker_plugins
示例12: test_increment_lock
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def test_increment_lock(self):
"""Test that we can't produce a race condition in .increment."""
await self.cog.redis.set("test_key", 0)
tasks = []
# Increment this a lot in different tasks
for _ in range(100):
task = asyncio.create_task(
self.cog.redis.increment("test_key", 1)
)
tasks.append(task)
await asyncio.gather(*tasks)
# Confirm that the value has been incremented the exact right number of times.
value = await self.cog.redis.get("test_key")
self.assertEqual(value, 100)
示例13: refresh_inventory
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def refresh_inventory(self) -> None:
"""Refresh internal documentation inventory."""
log.debug("Refreshing documentation inventory...")
# Clear the old base URLS and inventories to ensure
# that we start from a fresh local dataset.
# Also, reset the cache used for fetching documentation.
self.base_urls.clear()
self.inventories.clear()
self.renamed_symbols.clear()
async_cache.cache = OrderedDict()
# Run all coroutines concurrently - since each of them performs a HTTP
# request, this speeds up fetching the inventory data heavily.
coros = [
self.update_single(
package["package"], package["base_url"], package["inventory_url"]
) for package in await self.bot.api_client.get('bot/documentation-links')
]
await asyncio.gather(*coros)
示例14: main
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def main():
loop = asyncio.get_event_loop()
tsk = asyncio.ensure_future(pubsub(), loop=loop)
async def publish():
pub = await aioredis.create_redis(
'redis://localhost')
while not tsk.done():
# wait for clients to subscribe
while True:
subs = await pub.pubsub_numsub('channel:1')
if subs[b'channel:1'] == 1:
break
await asyncio.sleep(0, loop=loop)
# publish some messages
for msg in ['one', 'two', 'three']:
await pub.publish('channel:1', msg)
# send stop word
await pub.publish('channel:1', STOPWORD)
pub.close()
await pub.wait_closed()
loop.run_until_complete(asyncio.gather(publish(), tsk, loop=loop))
示例15: pipeline
# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import gather [as 別名]
def pipeline(self):
"""Returns :class:`Pipeline` object to execute bulk of commands.
It is provided for convenience.
Commands can be pipelined without it.
Example:
>>> pipe = redis.pipeline()
>>> fut1 = pipe.incr('foo') # NO `await` as it will block forever!
>>> fut2 = pipe.incr('bar')
>>> result = await pipe.execute()
>>> result
[1, 1]
>>> await asyncio.gather(fut1, fut2)
[1, 1]
>>> #
>>> # The same can be done without pipeline:
>>> #
>>> fut1 = redis.incr('foo') # the 'INCRY foo' command already sent
>>> fut2 = redis.incr('bar')
>>> await asyncio.gather(fut1, fut2)
[2, 2]
"""
return Pipeline(self._pool_or_conn, self.__class__)