本文整理汇总了Python中eventlet.spawn_n方法的典型用法代码示例。如果您正苦于以下问题:Python eventlet.spawn_n方法的具体用法?Python eventlet.spawn_n怎么用?Python eventlet.spawn_n使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eventlet
的用法示例。
在下文中一共展示了eventlet.spawn_n方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: spawn_n
# 需要导入模块: import eventlet [as 别名]
# 或者: from eventlet import spawn_n [as 别名]
def spawn_n(func, *args, **kwargs):
"""Passthrough method for eventlet.spawn_n.
This utility exists so that it can be stubbed for testing without
interfering with the service spawns.
It will also grab the context from the threadlocal store and add it to
the store on the new thread. This allows for continuity in logging the
context when using this method to spawn a new thread.
"""
_context = common_context.get_current()
@functools.wraps(func)
def context_wrapper(*args, **kwargs):
# NOTE: If update_store is not called after spawn_n it won't be
# available for the logger to pull from threadlocal storage.
if _context is not None:
_context.update_store()
func(*args, **kwargs)
eventlet.spawn_n(context_wrapper, *args, **kwargs)
示例2: _child_process
# 需要导入模块: import eventlet [as 别名]
# 或者: from eventlet import spawn_n [as 别名]
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher(self.conf, restart_method=self.restart_method)
launcher.launch_service(service)
return launcher
示例3: _run_server
# 需要导入模块: import eventlet [as 别名]
# 或者: from eventlet import spawn_n [as 别名]
def _run_server():
host = cfg.CONF.stream.host
port = cfg.CONF.stream.port
LOG.info('(PID=%s) ST2 Stream API is serving on http://%s:%s.', os.getpid(), host, port)
max_pool_size = eventlet.wsgi.DEFAULT_MAX_SIMULTANEOUS_REQUESTS
worker_pool = eventlet.GreenPool(max_pool_size)
sock = eventlet.listen((host, port))
def queue_shutdown(signal_number, stack_frame):
eventlet.spawn_n(shutdown_server_kill_pending_requests, sock=sock,
worker_pool=worker_pool, wait_time=WSGI_SERVER_REQUEST_SHUTDOWN_TIME)
# We register a custom SIGINT handler which allows us to kill long running active requests.
# Note: Eventually we will support draining (waiting for short-running requests), but we
# will still want to kill long running stream requests.
register_stream_signal_handlers(handler_func=queue_shutdown)
wsgi.server(sock, app.setup_app(), custom_pool=worker_pool)
return 0
示例4: get_listener
# 需要导入模块: import eventlet [as 别名]
# 或者: from eventlet import spawn_n [as 别名]
def get_listener(name):
global _stream_listener
global _execution_output_listener
if name == 'stream':
if not _stream_listener:
with transport_utils.get_connection() as conn:
_stream_listener = StreamListener(conn)
eventlet.spawn_n(listen, _stream_listener)
return _stream_listener
elif name == 'execution_output':
if not _execution_output_listener:
with transport_utils.get_connection() as conn:
_execution_output_listener = ExecutionOutputListener(conn)
eventlet.spawn_n(listen, _execution_output_listener)
return _execution_output_listener
else:
raise ValueError('Invalid listener name: %s' % (name))
示例5: do_post
# 需要导入模块: import eventlet [as 别名]
# 或者: from eventlet import spawn_n [as 别名]
def do_post(self):
is_valid, msg, json = self.validate_json(self.request, silent=False)
if not is_valid:
logger.error('invalid json: %s' % msg)
raise RuntimeError('invalid json')
if json is None:
raise RuntimeError('no json in request')
if not isinstance(json, dict):
raise RuntimeError('need a dict')
eventlet.spawn_n(self.async_post, dict(json))
示例6: startloopreport
# 需要导入模块: import eventlet [as 别名]
# 或者: from eventlet import spawn_n [as 别名]
def startloopreport(self):
if self.toxsession.report.tw.hasmarkup:
eventlet.spawn_n(self.toxsession.report._loopreport)
示例7: communicate
# 需要导入模块: import eventlet [as 别名]
# 或者: from eventlet import spawn_n [as 别名]
def communicate(self):
try:
self._invoke()
if not self.srequest.has_fd:
self._wait_for_write_with_timeout(self._input_data_write_fd)
# We do the writing in a different thread.
# Otherwise, we can run into the following deadlock
# 1. middleware writes to Storlet
# 2. Storlet reads and starts to write metadata and then data
# 3. middleware continues writing
# 4. Storlet continues writing and gets stuck as middleware
# is busy writing, but still not consuming the reader end
# of the Storlet writer.
eventlet.spawn_n(self._write_input_data,
self._input_data_write_fd,
self.srequest.data_iter)
for source in self.extra_data_sources:
# NOTE(kota_): not sure right now if using eventlet.spawn_n is
# right way. GreenPool is better? I don't get
# whole for the dead lock described in above.
self._wait_for_write_with_timeout(source['write_fd'])
eventlet.spawn_n(self._write_input_data,
source['write_fd'],
source['data_iter'])
out_md = self._read_metadata()
self._wait_for_read_with_timeout(self.data_read_fd)
return StorletResponse(out_md, data_fd=self.data_read_fd,
cancel=self._cancel)
except Exception:
self._close_local_side_descriptors()
if not self.srequest.has_fd:
self._close_input_data_descriptors()
raise