本文整理汇总了Python中tornado.concurrent.chain_future函数的典型用法代码示例。如果您正苦于以下问题:Python chain_future函数的具体用法?Python chain_future怎么用?Python chain_future使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了chain_future函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: when_available
def when_available(fut):
try:
conn = fut.result()
except psycopg2.Error as error:
future.set_exc_info(sys.exc_info())
if retry:
self.putconn(retry[0])
return
log.debug("Obtained connection: %s", conn.fileno)
try:
future_or_result = method(conn, *args, **kwargs)
except psycopg2.Error as error:
if conn.closed:
if not retry:
retry.append(conn)
self.ioloop.add_future(conn.connect(), when_available)
return
else:
future.set_exception(self._no_conn_availble_error)
else:
future.set_exc_info(sys.exc_info())
log.debug(2)
self.putconn(conn)
return
if not async:
future.set_result(future_or_result)
log.debug(3)
self.putconn(conn)
return
chain_future(future_or_result, future)
if not keep:
future.add_done_callback(lambda f: self.putconn(conn))
示例2: on_reanimate_done
def on_reanimate_done(fut):
if self.conns.all_dead:
future.set_exception(self._no_conn_available_error)
return
f = self.conns.acquire()
assert isinstance(f, Future)
chain_future(f, future)
示例3: handle_connection
def handle_connection(future):
conn = future.result()
if callback is not None:
def handle_result(future):
self._ioloop.add_callback(callback, future.result())
future1.add_done_callback(handle_result)
chain_future(conn.send_message(args), future1)
示例4: watch
def watch(self, url_path, on_data, **kwargs):
class WatchFuture(Future):
def cancel(self):
client.close()
logging.debug("AsyncHTTPClient closed")
def data_callback(data):
on_data(json.loads(data))
params = self.build_params(url_path, **kwargs)
url = url_concat(self.build_url(url_path, **kwargs), params)
request = HTTPRequest(
url=url,
method="GET",
headers=self.build_headers(),
request_timeout=3600,
streaming_callback=data_callback)
client = AsyncHTTPClient(force_instance=True)
future = WatchFuture()
chain_future(client.fetch(request), future)
return future
示例5: __init__
def __init__(self, future, io_loop, timeout_td, timeout_exception):
super(_Wait, self).__init__()
self._io_loop = io_loop
self._timeout_exception = timeout_exception
self._timeout_obj = io_loop.add_timeout(timeout_td, self._on_timeout)
concurrent.chain_future(future, self)
future.add_done_callback(clear_tb_log)
示例6: with_timeout
def with_timeout(timeout, future, io_loop=None):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, lambda: result.set_exception(TimeoutError("Timeout")))
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
示例7: with_timeout
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
"""Wraps a `.Future` (or other yieldable object) in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Does not support `YieldPoint` subclasses.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
.. versionchanged:: 4.4
Added support for yieldable objects other than `.Future`.
"""
# TODO: allow YieldPoints in addition to other yieldables?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
future = convert_yielded(future)
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout", future, exc_info=True)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
示例8: with_timeout
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
print("Exception in Future %r after timeout" % future)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
示例9: watch
def watch(self, url_path, on_data, **kwargs):
local_data = dict(buffer="")
class WatchFuture(Future):
def cancel(self):
client.close()
logging.debug("AsyncHTTPClient closed")
def data_callback(data):
split_data = data.split("\n")
for index, fragment in enumerate(split_data):
if index + 1 < len(split_data):
on_data(json.loads(local_data["buffer"] + fragment))
local_data["buffer"] = ""
else:
local_data["buffer"] += fragment
params = self.build_params(url_path, **kwargs)
url = url_concat(self.build_url(url_path, **kwargs), params)
request = HTTPRequest(
url=url,
method="GET",
headers=self.build_headers(),
request_timeout=3600,
streaming_callback=data_callback)
client = AsyncHTTPClient(force_instance=True)
future = WatchFuture()
chain_future(client.fetch(request), future)
return future
示例10: _return_result
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
示例11: facebook_request
def facebook_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can
obtain through `~OAuth2Mixin.authorize_redirect` and
`get_authenticated_user`. The user returned through that
process includes an ``access_token`` attribute that can be
used to make authenticated requests via this method.
Example usage:
..testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
The given path is relative to ``self._FACEBOOK_BASE_URL``,
by default "https://graph.facebook.com".
This method is a wrapper around `OAuth2Mixin.oauth2_request`;
the only difference is that this method takes a relative path,
while ``oauth2_request`` takes a complete url.
.. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``.
"""
url = self._FACEBOOK_BASE_URL + path
# Thanks to the _auth_return_future decorator, our "callback"
# argument is a Future, which we cannot pass as a callback to
# oauth2_request. Instead, have oauth2_request return a
# future and chain them together.
oauth_future = self.oauth2_request(url, access_token=access_token,
post_args=post_args, **args)
chain_future(oauth_future, callback)
示例12: enqueue
def enqueue(self, task):
if self._in_active(task):
future = concurrent.Future()
concurrent.chain_future(self._get_future_for_task(task), future)
return future
future = concurrent.Future()
self._add_to_active(task, future)
concurrent.chain_future(self._do(task), future)
return future
示例13: _return_result
def _return_result(self, done: Future) -> None:
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
if self._running_future is None:
raise Exception("no future is running")
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
示例14: start_request
def start_request(self, auth_result, request, user_result):
if auth_result.exception():
concurrent.chain_future(auth_result, user_result)
return
try:
f = self.client.fetch(request)
except Exception:
user_result.set_exc_info(sys.exc_info())
else:
cb = functools.partial(self.on_request_done, user_result)
f.add_done_callback(cb)
示例15: with_timeout
def with_timeout(timeout, future, io_loop=None):
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
timeout_handle = io_loop.add_timeout(
timeout,
lambda: result.set_exception(TimeoutError("Timeout")))
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
return result