本文整理汇总了Python中tornado.concurrent.is_future函数的典型用法代码示例。如果您正苦于以下问题:Python is_future函数的具体用法?Python is_future怎么用?Python is_future使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了is_future函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _execute
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms.
This is basically a copy of tornado's `_execute()` method. The only
difference is the expected result. Tornado expects the result to be
`None`, where we want this to be a :py:class:Model."""
verb = self.request.method.lower()
headers = self.request.headers
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if is_future(result):
result = yield result
if result is not None:
raise TypeError("Expected None, got %r" % result)
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if is_future(result):
result = yield result
if result is not None:
self._provide_result(verb, headers, result)
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
self._handle_request_exception(e)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
示例2: handle_yield
def handle_yield(self, yielded):
if isinstance(yielded, list):
if all(is_future(f) for f in yielded):
yielded = multi_future(yielded)
else:
yielded = Multi(yielded)
elif isinstance(yielded, dict):
if all(is_future(f) for f in yielded.values()):
yielded = multi_future(yielded)
else:
yielded = Multi(yielded)
if isinstance(yielded, YieldPoint):
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
elif is_future(yielded):
self.future = yielded
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
else:
self.future = TracebackFuture()
self.future.set_exception(BadYieldError(
"yielded unknown object %r" % (yielded,)))
return True
示例3: onRecvStream
def onRecvStream(self, stream, msg):
# If the handler triggers an exception, pyzmq will disable it
# Here we catch any exception and just log it, so that processing
# can continue
try:
cname = stream.channel_name
self.counters[cname] += 1
#buf_len = self.counters[cname] - self.counters[output_cname]
#self.logger.debug('Received on %s: %d', cname, buf_len)
handler = self.get_recv_handler(cname)
fut = handler(msg)
if is_future(fut):
self.loop.add_future(fut, self.onFuture)
output_cname = self.channels[cname].drained_by
if output_cname:
buf_len = self.streams[output_cname]._send_queue.qsize()
hwm = self.channels[cname].hwm
if buf_len >= (hwm - (hwm * 0.2)):
msg = "Pausing receive on: %s (Buffered: %d, HWM: %d)"
hwm = self.channels[cname].hwm
self.logger.info(msg, cname, buf_len, hwm)
stream.stop_on_recv()
except Error as err:
self.logger.exception(err)
self.init_db()
except Exception as err:
self.logger.exception(err)
示例4: execute_next
def execute_next(self, request, types, process_object, *args, **kwargs):
midd = None
if types == _TCALL:
midd = ('call_midds', 'process_call',)
elif types == _TREQ:
midd = ('request_midds', 'process_request',)
elif types == _TREN:
midd = ('render_midds', 'process_render',)
elif types == _TRES:
midd = ('response_midds', 'process_response',)
elif types == _TEND:
midd = ('end_midds', 'process_endcall',)
elif types == _TEXC:
midd = ('exc_midds', 'process_exception',)
if midd:
while 1:
method = self._get_func(request, midd[0], midd[1])
if method and callable(method):
clear = partial(self.clear_all, request)
result = method(process_object, clear, *args, **kwargs)
if is_future(result):
result = yield result
if result:
break
else:
break
示例5: new_fn
def new_fn(self, *args, **kwargs):
result = fn(self, *args, **kwargs)
if not is_future(result):
# If the function doesn't return a future, its exeption or result
# is available to the caller right away. No need to switch control
# with greenlets.
return result
current = greenlet.getcurrent()
assert current.parent is not None, "TornadoAction can only be used from inside a child greenlet."
def callback(future):
if future.exception():
self.io_loop.add_callback(current.throw, *future.exc_info())
else:
self.io_loop.add_callback(current.switch, future.result())
# Otherwise, switch to parent and schedule to switch back when the
# result is available.
# A note about add_done_callback: It executes the callback right away
# if the future has already finished executing. That's a problem
# because we don't want the greenlet switch back to current to happen
# until we've switched to parent first. So, io_loop.add_callback is
# used to schedule the future callback. This ensures that we switch to
# parent first.
self.io_loop.add_callback(result.add_done_callback, callback)
return current.parent.switch()
示例6: handle_request
def handle_request(self, topic, message):
if topic == self.subtopic:
# event from proxy received
try:
data = etree.fromstring(message, PluginRegistry.getEventParser())
event_type = stripNs(data.xpath('/g:Event/*', namespaces={'g': "http://www.gonicus.de/Events"})[0].tag)
if event_type == "ClientLeave":
proxy_id = str(data.ClientLeave.Id)
registry = PluginRegistry.getInstance("BackendRegistry")
registry.unregisterBackend(proxy_id)
except etree.XMLSyntaxError as e:
self.log.error("Event parsing error: %s" % e)
elif topic.startswith(self.subtopic):
response_topic = "%s/response" % "/".join(topic.split("/")[0:4])
try:
id_, res = self.process(topic, message)
if is_future(res):
res = yield res
response = dumps({"result": res, "id": id_})
self.log.debug("MQTT-RPC response: %s on topic %s" % (response, topic))
except Exception as e:
err = str(e)
self.log.error("MQTT RPC call error: %s" % err)
response = dumps({'id': topic.split("/")[-2], 'error': err})
# Get rid of it...
self.mqtt.send_message(response, topic=response_topic, qos=2)
else:
self.log.warning("unhandled topic request received: %s" % topic)
示例7: on_message
def on_message(self, message):
try:
message = json.loads(message)
dev = message["dev"]
circuit = message["circuit"]
try:
value = message["value"]
except:
value = None
try:
cmd = message["cmd"]
except:
cmd = "set"
try:
device = Devices.by_name(dev, circuit)
# result = device.set(value)
func = getattr(device, cmd)
if value is not None:
result = func(value)
else:
result = func()
if is_future(result):
result = yield result
print result
except Exception, E:
print E
except:
print "Skipping WS message: " + message
# skip it since we do not understand this message....
pass
示例8: convert_yielded
def convert_yielded(yielded):
if isinstance(yielded, (list, dict)):
return multi_future(yielded)
elif is_future(yielded):
return yielded
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
示例9: stop
def stop(self, io_loop):
"""
Asynchronously stop the application.
:param tornado.ioloop.IOLoop io_loop: loop to run until all
callbacks, timeouts, and queued calls are complete
Call this method to start the application shutdown process.
The IOLoop will be stopped once the application is completely
shut down.
"""
running_async = False
shutdown = _ShutdownHandler(io_loop)
for callback in self.on_shutdown_callbacks:
try:
maybe_future = callback(self.tornado_application)
if asyncio.iscoroutine(maybe_future):
maybe_future = asyncio.create_task(maybe_future)
if concurrent.is_future(maybe_future):
shutdown.add_future(maybe_future)
running_async = True
except Exception as error:
self.logger.warning('exception raised from shutdown '
'callback %r, ignored: %s',
callback, error, exc_info=1)
if not running_async:
shutdown.on_shutdown_ready()
示例10: convert_yielded
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled earlier.
if yielded is None:
return moment
elif isinstance(yielded, (list, dict)):
return multi(yielded)
elif is_future(yielded):
return yielded
elif isawaitable(yielded):
return _wrap_awaitable(yielded)
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
示例11: refresh
def refresh(self):
"""Load dynamic credentials from the AWS Instance Metadata and user
data HTTP API.
:raises: tornado_aws.exceptions.NoCredentialsError
"""
LOGGER.debug('Refreshing EC2 IAM Credentials')
async = isinstance(self._client, httpclient.AsyncHTTPClient)
future = concurrent.TracebackFuture() if async else None
try:
result = self._fetch_credentials(async)
if concurrent.is_future(result):
def on_complete(response):
exception = response.exception()
if exception:
if isinstance(exception, httpclient.HTTPError) and \
exception.code == 599:
future.set_exception(
exceptions.NoCredentialsError())
else:
future.set_exception(exception)
return
self._assign_credentials(response.result())
future.set_result(True)
self._ioloop.add_future(result, on_complete)
else:
self._assign_credentials(result)
except (httpclient.HTTPError,
OSError) as error:
LOGGER.error('Error Fetching Credentials: %s', error)
raise exceptions.NoCredentialsError()
return future
示例12: convert_yielded
def convert_yielded(yielded: _Yieldable) -> Future:
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
if yielded is None or yielded is moment:
return moment
elif yielded is _null_future:
return _null_future
elif isinstance(yielded, (list, dict)):
return multi(yielded) # type: ignore
elif is_future(yielded):
return typing.cast(Future, yielded)
elif isawaitable(yielded):
return _wrap_awaitable(yielded) # type: ignore
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
示例13: on_message
def on_message(self, message):
''' handle an rpc calls '''
data = json_decode(message)
for id_, action, kwargs in data.get("requests"):
context = self.micro_context(
self._client_id, id_, action, self._cookies_, self)
try:
LOGGER.info(
"%s %s %s %r", id(self),
context.action_id,
context.action,
kwargs)
service = self.settings["services"].get(context.action)
if service is None:
raise Exception("No such service {}".format(context.action))
result = service.perform(context, ** kwargs)
if concurrent.is_future(result):
result.add_done_callback(
functools.partial(self.handle_future,
service,
context,
False))
else:
self.handle_result(service, context, result)
except Exception as ex:
self.write_err(context, ex)
示例14: _wrapper
def _wrapper(self, *args, **kwargs):
try:
# 因为这段代码是在 @gen.coroutine 装饰器中,
# 如果这段代码发生异常,没有用 except 捕获的话就无法自动调用 write_error
validate_success = True
errors = None
if input_schema is not None:
v = cerberus.Validator(input_schema)
# 允许提交未知的数据
v.allow_unknown = True
if not v.validate(self.post_data):
validate_success = False
errors = v.errors
if not validate_success:
logger.warning(errors)
# 验证失败,返回错误
self.fail(msg='提交的数据格式不正确')
else:
# Call the request_handler method
ret = func(self, *args, **kwargs)
if is_future(ret):
yield ret
# 如果 rh_method 用了 coroutine,并且这个函数中抛出了异常,
# 但是这里没有用 yield 的话,就无法捕获到异常,从而调用 write_error
logger.debug('yield')
except gen.Return:
pass
except Exception as e:
logger.debug(traceback.format_exc())
logger.debug(e)
self.write_error(GATEWAY_ERROR_STATUS_CODE, exc_info=sys.exc_info())
示例15: maybe_future
def maybe_future(x):
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut