当前位置: 首页>>代码示例>>Python>>正文


Python asyncio.gather方法代码示例

本文整理汇总了Python中asyncio.gather方法的典型用法代码示例。如果您正苦于以下问题:Python asyncio.gather方法的具体用法?Python asyncio.gather怎么用?Python asyncio.gather使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在asyncio的用法示例。


在下文中一共展示了asyncio.gather方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _async_wait_for_process

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def _async_wait_for_process(
        future_process: Any,
        out: Optional[Union[TeeCapture, IO[str]]] = sys.stdout,
        err: Optional[Union[TeeCapture, IO[str]]] = sys.stderr
) -> CommandOutput:
    """Awaits the creation and completion of an asynchronous process.

    Args:
        future_process: The eventually created process.
        out: Where to write stuff emitted by the process' stdout.
        err: Where to write stuff emitted by the process' stderr.

    Returns:
        A (captured output, captured error output, return code) triplet.
    """
    process = await future_process
    future_output = _async_forward(process.stdout, out)
    future_err_output = _async_forward(process.stderr, err)
    output, err_output = await asyncio.gather(future_output, future_err_output)
    await process.wait()

    return CommandOutput(output, err_output, process.returncode) 
开发者ID:quantumlib,项目名称:OpenFermion-Cirq,代码行数:24,代码来源:shell_tools.py

示例2: test_main

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def test_main(self):
        # Need to run within a Task, as the scope manager depends
        # on Task.current_task()
        async def main_task():
            with self.tracer.start_active_span('parent'):
                tasks = self.submit_callbacks()
                await asyncio.gather(*tasks)

        self.loop.create_task(main_task())

        stop_loop_when(self.loop,
                       lambda: len(self.tracer.finished_spans()) >= 4)
        self.loop.run_forever()

        spans = self.tracer.finished_spans()
        self.assertEquals(len(spans), 4)
        self.assertNamesEqual(spans, ['task', 'task', 'task', 'parent'])

        for i in range(3):
            self.assertSameTrace(spans[i], spans[-1])
            self.assertIsChildOf(spans[i], spans[-1]) 
开发者ID:opentracing,项目名称:opentracing-python,代码行数:23,代码来源:test_asyncio.py

示例3: test_websocket_non_regression_bug_105

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def test_websocket_non_regression_bug_105(event_loop, server):

    # This test will check a fix to a race condition which happens if the user is trying
    # to connect using the same client twice at the same time
    # See bug #105

    url = f"ws://{server.hostname}:{server.port}/graphql"
    print(f"url = {url}")

    sample_transport = WebsocketsTransport(url=url)

    client = Client(transport=sample_transport)

    # Create a coroutine which start the connection with the transport but does nothing
    async def client_connect(client):
        async with client:
            await asyncio.sleep(2 * MS)

    # Create two tasks which will try to connect using the same client (not allowed)
    connect_task1 = asyncio.ensure_future(client_connect(client))
    connect_task2 = asyncio.ensure_future(client_connect(client))

    with pytest.raises(TransportAlreadyConnected):
        await asyncio.gather(connect_task1, connect_task2) 
开发者ID:graphql-python,项目名称:gql,代码行数:26,代码来源:test_websocket_exceptions.py

示例4: make_query_loop

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def make_query_loop(tmpdir, config_data, registry):
    query_loops = []

    def make_loop():
        config_file = tmpdir / "config.yaml"
        config_file.write_text(yaml.dump(config_data), "utf-8")
        with config_file.open() as fh:
            config = load_config(fh, logging.getLogger())
        registry.create_metrics(config.metrics.values())
        query_loop = loop.QueryLoop(config, registry, logging)
        query_loops.append(query_loop)
        return query_loop

    yield make_loop
    await asyncio.gather(
        *(query_loop.stop() for query_loop in query_loops), return_exceptions=True,
    ) 
开发者ID:albertodonato,项目名称:query-exporter,代码行数:19,代码来源:test_loop.py

示例5: _do_ops

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def _do_ops(self, ops):
        try:
            for r in await asyncio.gather(*ops, return_exceptions=True):
                if isinstance(r, MessageNotModifiedError):
                    logging.debug("db not modified", exc_info=r)
                elif isinstance(r, Exception):
                    raise r  # Makes more sense to raise even for MessageEditTimeExpiredError
                elif not isinstance(r, Message):
                    logging.debug("unknown ret from gather, %r", r)
        except MessageEditTimeExpiredError:
            logging.debug("Making new channel.")
            _db = self.db
            self.db = None
            await self._client(DeleteChannelRequest(channel=_db))
            return True
        return False 
开发者ID:friendly-telegram,项目名称:friendly-telegram,代码行数:18,代码来源:backend.py

示例6: test_main

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def test_main(self):
        # Need to run within a Task, as the scope manager depends
        # on Task.current_task()
        async def main_task():
            with self.tracer.start_active_span('parent'):
                tasks = self.submit_callbacks()
                await asyncio.gather(*tasks)

        self.loop.create_task(main_task())

        stop_loop_when(self.loop,
                       lambda: len(self.tracer.finished_spans()) >= 4)
        self.loop.run_forever()

        spans = self.tracer.finished_spans()
        self.assertEqual(len(spans), 4)
        self.assertNamesEqual(spans, ['task', 'task', 'task', 'parent'])

        for i in range(3):
            self.assertSameTrace(spans[i], spans[-1])
            self.assertIsChildOf(spans[i], spans[-1]) 
开发者ID:opentracing,项目名称:opentracing-python,代码行数:23,代码来源:test_contextvars.py

示例7: _cancel_all_tasks

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def _cancel_all_tasks(loop: asyncio.AbstractEventLoop) -> None:
    tasks = [task for task in asyncio.all_tasks(loop) if not task.done()]
    if not tasks:
        return

    for task in tasks:
        task.cancel()
    loop.run_until_complete(asyncio.gather(*tasks, loop=loop, return_exceptions=True))

    for task in tasks:
        if not task.cancelled() and task.exception() is not None:
            loop.call_exception_handler(
                {
                    "message": "unhandled exception during shutdown",
                    "exception": task.exception(),
                    "task": task,
                }
            ) 
开发者ID:pgjones,项目名称:quart,代码行数:20,代码来源:app.py

示例8: test_task_local

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def test_task_local() -> None:
    local_ = TaskLocal()
    queue: asyncio.Queue = asyncio.Queue()
    tasks = 2
    for _ in range(tasks):
        queue.put_nowait(None)

    async def _test_local(value: int) -> int:
        local_.test = value
        await queue.get()
        queue.task_done()
        await queue.join()
        return local_.test

    futures = [asyncio.ensure_future(_test_local(value)) for value in range(tasks)]
    asyncio.gather(*futures)
    for value, future in enumerate(futures):
        assert (await future) == value 
开发者ID:pgjones,项目名称:quart,代码行数:20,代码来源:test_local.py

示例9: run

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def run(self, host):
        tasks = []
        # 默认limit=100,enable_cleanup_closed设置为True防止ssl泄露,ttl_dns_cache调高dns缓存
        conn = aiohttp.TCPConnector(
            limit=LIMIT,
            enable_cleanup_closed=True,
            ttl_dns_cache=100,
            ssl=False,
        )
        timeout = aiohttp.ClientTimeout(total=60, connect=2)
        async with aiohttp.ClientSession(connector=conn, timeout=timeout) as session:
            for url in self.urls:
                task = asyncio.ensure_future(self.scan(host, url, session))
                tasks.append(task)
            # gather方法是所有请求完成后才有输出
            _ = await asyncio.gather(*tasks)
            # for i in asyncio.as_completed(tasks):  # 类似于线程池中的task一样
            #     answer = await i
    
    # 创建启动任务 
开发者ID:al0ne,项目名称:Vxscan,代码行数:22,代码来源:async_scan.py

示例10: import_file

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def import_file(filename):
    log.info("import_file: {}".format(filename))
    loop = globals["loop"]
    max_concurrent_tasks = config.get("max_concurrent_tasks")
    tasks = []
    with open(filename, 'r') as fh:
        for line in fh:
            line = line.rstrip()
            #loop.run_until_complete(import_line(line))
            tasks.append(asyncio.ensure_future(import_line(line)))
            if len(tasks) < max_concurrent_tasks:
                continue  # get next line
            # got a batch, move them out!
            loop.run_until_complete(asyncio.gather(*tasks))
            tasks = []
    # finish any remaining tasks
    loop.run_until_complete(asyncio.gather(*tasks))
    globals["files_read"] += 1 
开发者ID:HDFGroup,项目名称:hsds,代码行数:20,代码来源:import_ghcn_file.py

示例11: _get_dispatches

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def _get_dispatches(
        self, payload: Payload, request: Request
    ) -> Tuple[Payload, Set[str]]:
        # Run all dispatchers to form our initial set of worker plugins to run
        worker_plugins: Set[str] = set(
            self.always_dispatch
        ) if self.always_dispatch else set()
        dispatch_results: List[Set[str]] = await asyncio.gather(  # type: ignore
            *[
                self._apply_dispatcher(dispatcher, payload, request)
                for dispatcher in self._loaded_dispatcher_plugins.values()
            ]
        )
        for dispatch_result in dispatch_results:
            worker_plugins.update(dispatch_result)
        return payload, worker_plugins 
开发者ID:PUNCH-Cyber,项目名称:stoq,代码行数:18,代码来源:core.py

示例12: test_increment_lock

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def test_increment_lock(self):
        """Test that we can't produce a race condition in .increment."""
        await self.cog.redis.set("test_key", 0)
        tasks = []

        # Increment this a lot in different tasks
        for _ in range(100):
            task = asyncio.create_task(
                self.cog.redis.increment("test_key", 1)
            )
            tasks.append(task)
        await asyncio.gather(*tasks)

        # Confirm that the value has been incremented the exact right number of times.
        value = await self.cog.redis.get("test_key")
        self.assertEqual(value, 100) 
开发者ID:python-discord,项目名称:bot,代码行数:18,代码来源:test_redis_cache.py

示例13: refresh_inventory

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def refresh_inventory(self) -> None:
        """Refresh internal documentation inventory."""
        log.debug("Refreshing documentation inventory...")

        # Clear the old base URLS and inventories to ensure
        # that we start from a fresh local dataset.
        # Also, reset the cache used for fetching documentation.
        self.base_urls.clear()
        self.inventories.clear()
        self.renamed_symbols.clear()
        async_cache.cache = OrderedDict()

        # Run all coroutines concurrently - since each of them performs a HTTP
        # request, this speeds up fetching the inventory data heavily.
        coros = [
            self.update_single(
                package["package"], package["base_url"], package["inventory_url"]
            ) for package in await self.bot.api_client.get('bot/documentation-links')
        ]
        await asyncio.gather(*coros) 
开发者ID:python-discord,项目名称:bot,代码行数:22,代码来源:doc.py

示例14: main

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def main():
    loop = asyncio.get_event_loop()
    tsk = asyncio.ensure_future(pubsub(), loop=loop)

    async def publish():
        pub = await aioredis.create_redis(
            'redis://localhost')
        while not tsk.done():
            # wait for clients to subscribe
            while True:
                subs = await pub.pubsub_numsub('channel:1')
                if subs[b'channel:1'] == 1:
                    break
                await asyncio.sleep(0, loop=loop)
            # publish some messages
            for msg in ['one', 'two', 'three']:
                await pub.publish('channel:1', msg)
            # send stop word
            await pub.publish('channel:1', STOPWORD)
        pub.close()
        await pub.wait_closed()

    loop.run_until_complete(asyncio.gather(publish(), tsk, loop=loop)) 
开发者ID:aio-libs,项目名称:aioredis,代码行数:25,代码来源:pool_pubsub.py

示例15: pipeline

# 需要导入模块: import asyncio [as 别名]
# 或者: from asyncio import gather [as 别名]
def pipeline(self):
        """Returns :class:`Pipeline` object to execute bulk of commands.

        It is provided for convenience.
        Commands can be pipelined without it.

        Example:

        >>> pipe = redis.pipeline()
        >>> fut1 = pipe.incr('foo') # NO `await` as it will block forever!
        >>> fut2 = pipe.incr('bar')
        >>> result = await pipe.execute()
        >>> result
        [1, 1]
        >>> await asyncio.gather(fut1, fut2)
        [1, 1]
        >>> #
        >>> # The same can be done without pipeline:
        >>> #
        >>> fut1 = redis.incr('foo')    # the 'INCRY foo' command already sent
        >>> fut2 = redis.incr('bar')
        >>> await asyncio.gather(fut1, fut2)
        [2, 2]
        """
        return Pipeline(self._pool_or_conn, self.__class__) 
开发者ID:aio-libs,项目名称:aioredis,代码行数:27,代码来源:transaction.py


注:本文中的asyncio.gather方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。