当前位置: 首页>>代码示例>>Python>>正文


Python gen.with_timeout函数代码示例

本文整理汇总了Python中tornado.gen.with_timeout函数的典型用法代码示例。如果您正苦于以下问题:Python with_timeout函数的具体用法?Python with_timeout怎么用?Python with_timeout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了with_timeout函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _kill

    def _kill(self, stream=None, timeout=5):
        """ Kill the local worker process

        Blocks until both the process is down and the scheduler is properly
        informed
        """
        while not self.worker_port:
            yield gen.sleep(0.1)

        if self.process is not None:
            try:
                # Ask worker to close
                worker = rpc(ip='127.0.0.1', port=self.worker_port)
                result = yield gen.with_timeout(
                            timedelta(seconds=min(1, timeout)),
                            worker.terminate(report=False),
                            io_loop=self.loop)
            except gen.TimeoutError:
                logger.info("Worker non-responsive.  Terminating.")
            except StreamClosedError:
                pass
            except Exception as e:
                logger.exception(e)

            try:
                # Tell scheduler that worker is gone
                result = yield gen.with_timeout(timedelta(seconds=timeout),
                            self.scheduler.unregister(address=self.worker_address),
                            io_loop=self.loop)
                if result not in ('OK', 'already-removed'):
                    logger.critical("Unable to unregister with scheduler %s. "
                            "Nanny: %s, Worker: %s", result, self.address_tuple,
                            self.worker_address)
                else:
                    logger.info("Unregister worker %s:%d from scheduler",
                                self.ip, self.worker_port)
            except gen.TimeoutError:
                logger.info("Nanny %s:%d failed to unregister worker %s:%d",
                        self.ip, self.port, self.ip, self.worker_port,
                        exc_info=True)
            except StreamClosedError:
                pass
            except Exception as e:
                logger.exception(e)

            if self.process:
                with ignoring(OSError):
                    self.process.terminate()
                if self.process in processes_to_close:
                    processes_to_close.remove(self.process)

                start = time()
                while isalive(self.process) and time() < start + timeout:
                    sleep(0.01)

                self.process = None
                self.cleanup()
                logger.info("Nanny %s:%d kills worker process %s:%d",
                            self.ip, self.port, self.ip, self.worker_port)
        raise gen.Return('OK')
开发者ID:broxtronix,项目名称:distributed,代码行数:60,代码来源:nanny.py

示例2: connect

    def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
                max_buffer_size=None, source_ip=None, source_port=None,
                timeout=None):
        """Connect to the given host and port.

        Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
        ``ssl_options`` is not None).

        Using the ``source_ip`` kwarg, one can specify the source
        IP address to use when establishing the connection.
        In case the user needs to resolve and
        use a specific interface, it has to be handled outside
        of Tornado as this depends very much on the platform.

        Raises `TimeoutError` if the input future does not complete before
        ``timeout``, which may be specified in any form allowed by
        `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
        relative to `.IOLoop.time`)

        Similarly, when the user requires a certain source port, it can
        be specified using the ``source_port`` arg.

        .. versionchanged:: 4.5
           Added the ``source_ip`` and ``source_port`` arguments.

        .. versionchanged:: 5.0
           Added the ``timeout`` argument.
        """
        if timeout is not None:
            if isinstance(timeout, numbers.Real):
                timeout = IOLoop.current().time() + timeout
            elif isinstance(timeout, datetime.timedelta):
                timeout = IOLoop.current().time() + timedelta_to_seconds(timeout)
            else:
                raise TypeError("Unsupported timeout %r" % timeout)
        if timeout is not None:
            addrinfo = yield gen.with_timeout(
                timeout, self.resolver.resolve(host, port, af))
        else:
            addrinfo = yield self.resolver.resolve(host, port, af)
        connector = _Connector(
            addrinfo,
            functools.partial(self._create_stream, max_buffer_size,
                              source_ip=source_ip, source_port=source_port)
        )
        af, addr, stream = yield connector.start(connect_timeout=timeout)
        # TODO: For better performance we could cache the (af, addr)
        # information here and re-use it on subsequent connections to
        # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
        if ssl_options is not None:
            if timeout is not None:
                stream = yield gen.with_timeout(timeout, stream.start_tls(
                    False, ssl_options=ssl_options, server_hostname=host))
            else:
                stream = yield stream.start_tls(False, ssl_options=ssl_options,
                                                server_hostname=host)
        raise gen.Return(stream)
开发者ID:Agnewee,项目名称:tornado,代码行数:57,代码来源:tcpclient.py

示例3: locator_health_check

 def locator_health_check(self, period=5):
     wait_timeot = datetime.timedelta(seconds=period)
     while True:
         try:
             self.logger.debug("check health status of locator via cluster method")
             channel = yield gen.with_timeout(wait_timeot, self.locator.cluster())
             cluster = yield gen.with_timeout(wait_timeot, channel.rx.get())
             self.locator_status = True
             self.logger.debug("dumped cluster %s", cluster)
             yield gen.sleep(period)
         except Exception as err:
             self.logger.error("health status check failed: %s", err)
             self.locator_status = False
             yield gen.sleep(1)
开发者ID:cocaine,项目名称:cocaine-tools,代码行数:14,代码来源:proxy.py

示例4: acquire

 def acquire(self, stream=None, name=None, id=None, timeout=None):
     with log_errors():
         if isinstance(name, list):
             name = tuple(name)
         if name not in self.ids:
             result = True
         else:
             while name in self.ids:
                 event = tornado.locks.Event()
                 self.events[name].append(event)
                 future = event.wait()
                 if timeout is not None:
                     future = gen.with_timeout(timedelta(seconds=timeout), future)
                 try:
                     yield future
                 except gen.TimeoutError:
                     result = False
                     break
                 else:
                     result = True
                 finally:
                     event2 = self.events[name].popleft()
                     assert event is event2
         if result:
             assert name not in self.ids
             self.ids[name] = id
         raise gen.Return(result)
开发者ID:tomMoral,项目名称:distributed,代码行数:27,代码来源:lock.py

示例5: wait

    def wait(self, timeout: Union[float, datetime.timedelta] = None) -> "Future[None]":
        """Block until the internal flag is true.

        Returns a Future, which raises `tornado.util.TimeoutError` after a
        timeout.
        """
        fut = Future()  # type: Future[None]
        if self._value:
            fut.set_result(None)
            return fut
        self._waiters.add(fut)
        fut.add_done_callback(lambda fut: self._waiters.remove(fut))
        if timeout is None:
            return fut
        else:
            timeout_fut = gen.with_timeout(
                timeout, fut, quiet_exceptions=(CancelledError,)
            )
            # This is a slightly clumsy workaround for the fact that
            # gen.with_timeout doesn't cancel its futures. Cancelling
            # fut will remove it from the waiters list.
            timeout_fut.add_done_callback(
                lambda tf: fut.cancel() if not fut.done() else None
            )
            return timeout_fut
开发者ID:rgbkrk,项目名称:tornado,代码行数:25,代码来源:locks.py

示例6: test_no_ref

 def test_no_ref(self):
     # In this usage, there is no direct hard reference to the
     # WaitIterator itself, only the Future it returns. Since
     # WaitIterator uses weak references internally to improve GC
     # performance, this used to cause problems.
     yield gen.with_timeout(datetime.timedelta(seconds=0.1),
                            gen.WaitIterator(gen.sleep(0)).next())
开发者ID:7924102,项目名称:tornado,代码行数:7,代码来源:gen_test.py

示例7: spawn_single_user

    def spawn_single_user(self, user, options=None):
        if user.spawn_pending:
            raise RuntimeError("Spawn already pending for: %s" % user.name)
        tic = IOLoop.current().time()

        f = user.spawn(options)

        @gen.coroutine
        def finish_user_spawn(f=None):
            """Finish the user spawn by registering listeners and notifying the proxy.
            
            If the spawner is slow to start, this is passed as an async callback,
            otherwise it is called immediately.
            """
            if f and f.exception() is not None:
                # failed, don't add to the proxy
                return
            toc = IOLoop.current().time()
            self.log.info("User %s server took %.3f seconds to start", user.name, toc-tic)
            yield self.proxy.add_user(user)
            user.spawner.add_poll_callback(self.user_stopped, user)
        
        try:
            yield gen.with_timeout(timedelta(seconds=self.slow_spawn_timeout), f)
        except gen.TimeoutError:
            if user.spawn_pending:
                # hit timeout, but spawn is still pending
                self.log.warn("User %s server is slow to start", user.name)
                # schedule finish for when the user finishes spawning
                IOLoop.current().add_future(f, finish_user_spawn)
            else:
                raise
        else:
            yield finish_user_spawn()
开发者ID:paul918,项目名称:jupyterhub,代码行数:34,代码来源:base.py

示例8: start

    def start(self, image=None):
        """start the single-user server in a docker container"""
        tmp_dir = mkdtemp(suffix='everware')
        yield self.git('clone', self.repo_url, tmp_dir)
        # is this blocking?
        # use the username, git repo URL and HEAD commit sha to derive
        # the image name
        repo = git.Repo(tmp_dir)
        self.repo_sha = repo.rev_parse("HEAD")

        image_name = "everware/{}-{}-{}".format(self.user.name,
                                                self.escaped_repo_url,
                                                self.repo_sha)
        self.log.debug("Building image {}".format(image_name))
        build_log = yield gen.with_timeout(timedelta(30),
                                           self.docker('build',
                                                       path=tmp_dir,
                                                       tag=image_name,
                                                       rm=True)
        )
        self.log.debug("".join(str(line) for line in build_log))
        self.log.info("Built docker image {}".format(image_name))

        images = yield self.docker('images', image_name)
        self.log.debug(images)

        yield super(CustomDockerSpawner, self).start(
            image=image_name
        )
开发者ID:OmeGak,项目名称:dockerspawner,代码行数:29,代码来源:customdockerspawner.py

示例9: enter

    def enter(self, timeout=None):
        log.debug("Entering double barrier %s", self.base_path)
        time_limit = None
        if timeout is not None:
            time_limit = time.time() + timeout

        barrier_lifted = self.client.wait_for_event(
            WatchEvent.CREATED, self.sentinel_path
        )
        if time_limit:
            barrier_lifted = gen.with_timeout(barrier_lifted, time_limit)

        exists = yield self.client.exists(path=self.sentinel_path, watch=True)

        yield self.create_unique_znode("worker")

        _, participants = yield self.analyze_siblings()

        if exists:
            return

        elif len(participants) >= self.min_participants:
            yield self.create_znode(self.sentinel_path)
            return

        try:
            yield barrier_lifted
        except gen.TimeoutError:
            raise exc.TimeoutError
开发者ID:Soniccube,项目名称:zoonado,代码行数:29,代码来源:double_barrier.py

示例10: enquire

    def enquire(self, user, rider):
        yield self.sender({
            'chat_id': user['chat_id'],
            'text': 'request for a ride from:'
        })
        yield self.sender({
            'chat_id': user['chat_id'],
            'location': rider['current_location']
        })
        yield self.sender({
            'chat_id': user['chat_id'],
            'text': 'to:'
        })
        yield self.sender({
            'chat_id': user['chat_id'],
            'location': rider['target_location']
        })
        yield self.sender({
            'chat_id': user['chat_id'],
            'text': 'how much do you charge for it? (example answer: 25)'
        })
        DriverQueue.futures[user['chat_id']] = gen.with_timeout(
            timedelta(seconds=self.enquire_timeout), Future())

        try:
            bid = yield DriverQueue.futures[user['chat_id']]
            return (bid, user)
        except:
            pass
        finally:
            DriverQueue.futures.pop(user['chat_id'])
开发者ID:gonber,项目名称:cabrobot,代码行数:31,代码来源:driverqueue.py

示例11: recv

 def recv():
     while True:
         result = yield gen.with_timeout(timedelta(seconds=1), read(stream))
         print(result)
         L.extend(result)
         if result[-1] == 9999:
             break
开发者ID:HugoTian,项目名称:distributed,代码行数:7,代码来源:test_batched.py

示例12: test_serializers

def test_serializers():
    with echo_server() as e:
        comm = yield connect(e.address)

        b = BatchedSend(interval='10ms', serializers=['msgpack'])
        b.start(comm)

        b.send({'x': to_serialize(123)})
        b.send({'x': to_serialize('hello')})
        yield gen.sleep(0.100)

        b.send({'x': to_serialize(lambda x: x + 1)})

        with captured_logger('distributed.protocol') as sio:
            yield gen.sleep(0.100)

        value = sio.getvalue()
        assert 'serialize' in value
        assert 'type' in value
        assert 'function' in value

        msg = yield comm.read()
        assert list(msg) == [{'x': 123}, {'x': 'hello'}]

        with pytest.raises(gen.TimeoutError):
            msg = yield gen.with_timeout(timedelta(milliseconds=100), comm.read())
开发者ID:tomMoral,项目名称:distributed,代码行数:26,代码来源:test_batched.py

示例13: run_traffic_jam

def run_traffic_jam(nsends, nbytes):
    # This test eats `nsends * nbytes` bytes in RAM
    np = pytest.importorskip('numpy')
    from distributed.protocol import to_serialize
    data = bytes(np.random.randint(0, 255, size=(nbytes,)).astype('u1').data)
    with echo_server() as e:
        comm = yield connect(e.address)

        b = BatchedSend(interval=0.01)
        b.start(comm)

        msg = {'x': to_serialize(data)}
        for i in range(nsends):
            b.send(assoc(msg, 'i', i))
            if np.random.random() > 0.5:
                yield gen.sleep(0.001)

        results = []
        count = 0
        while len(results) < nsends:
            # If this times out then I think it's a backpressure issue
            # Somehow we're able to flood the socket so that the receiving end
            # loses some of our messages
            L = yield gen.with_timeout(timedelta(seconds=5), comm.read())
            count += 1
            results.extend(r['i'] for r in L)

        assert count == b.batch_count == e.count
        assert b.message_count == nsends

        assert results == list(range(nsends))

        comm.close()  # external closing
        yield b.close()
开发者ID:tomMoral,项目名称:distributed,代码行数:34,代码来源:test_batched.py

示例14: _connect

    def _connect(self,
                 conn_type,
                 session,
                 force_close,
                 force_release,
                 pool):
        future = self._future_class()
        request = self._connector(self._url)
        if self._timeout:
            future_conn = with_timeout(timeout, websocket_connect(request))
        else:
            future_conn = websocket_connect(request)

        def get_conn(f):
            try:
                conn = f.result()
            except socket.error:
                future.set_exception(
                    RuntimeError("Could not connect to server."))
            except socket.gaierror:
                future.set_exception(
                    RuntimeError("Could not connect to server."))
            except HTTPError as e:
                future.set_exception(e)
            except Exception as e:
                future.set_exception(e)
            else:
                resp = Response(conn, self._future_class, self._loop)
                gc = conn_type(resp, self._future_class, self._timeout,
                               self._username, self._password, self._loop,
                               force_close, pool, force_release, session)
                future.set_result(gc)
        future_conn.add_done_callback(get_conn)
        return future
开发者ID:davebshow,项目名称:gremlinclient,代码行数:34,代码来源:client.py

示例15: test_normal_concurrent_future

 def test_normal_concurrent_future(self):
     # A conccurrent future that resolves while waiting for the timeout.
     with futures.ThreadPoolExecutor(1) as executor:
         yield gen.with_timeout(
             datetime.timedelta(seconds=3600),
             executor.submit(lambda: time.sleep(0.01)),
         )
开发者ID:rgbkrk,项目名称:tornado,代码行数:7,代码来源:gen_test.py


注:本文中的tornado.gen.with_timeout函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。