本文整理汇总了Python中eventlet.greenpool.GreenPool.spawn方法的典型用法代码示例。如果您正苦于以下问题:Python GreenPool.spawn方法的具体用法?Python GreenPool.spawn怎么用?Python GreenPool.spawn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eventlet.greenpool.GreenPool
的用法示例。
在下文中一共展示了GreenPool.spawn方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_high_client_load
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
def test_high_client_load():
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6805")
clients = GreenPool()
for i in xrange(0, 100):
clients.spawn(fake_client, "tcp://127.0.0.1:6804",
"%s:%s" % (os.getpid(), i))
clients.waitall()
示例2: test_high_workload
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
def test_high_workload():
# fire up three services to receive in roundrobin style, giving
# each an ident so we can make sure they're working that way
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 1)
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 2)
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 3)
clients = GreenPool()
# fire up a bunch of clients to thrash it at random
for i in xrange(0, 100):
clients.spawn(fake_client, "tcp://127.0.0.1:6802", "%s:%s" % (os.getpid(), i))
clients.waitall()
示例3: test_high_client_load
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
def test_high_client_load():
test_context = {'clients': 0, 'services': 0}
pool = GreenPool()
pool.spawn(fake_service,
"tcp://127.0.0.1:6801", test_context)
for i in xrange(0, 10):
pool.spawn(fake_client, "tcp://127.0.0.1:6800",
"%s" % i, test_context)
pool.waitall()
assert_equal(test_context['clients'], 10)
assert_equal(test_context['services'], 100)
示例4: _parallel_execute
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
def _parallel_execute(self, operation, *args):
def _spawn(context, operation, fabric_name, conn, *args):
# Inherit this thread's context from the parent
context.update_store()
@lockutils.synchronized(fabric_name, 'fcfabric-', True)
def _locked_spawn(operation, fabric_name, conn, *args):
return operation(fabric_name, conn, *args)
return _locked_spawn(operation, fabric_name, conn, *args)
"""
Perform an operation against all fabrics, consolidate the responses
into a dictionary keyed on fabric name.
"""
pool = GreenPool(size=len(self.fabrics))
# Obtain our current context so that we can make sure that our child
# threads have the same context, so that we can correlate log messages
# that they generate.
context = getattr(local.store, 'context', None)
threads = {}
for fabric_name, conn in self.fabrics.iteritems():
thread = pool.spawn(_spawn, context, operation, fabric_name, conn,
*args)
threads[fabric_name] = thread
# Collect the responses. This may raise exceptions when we call wait()
# If they do, we collect them and raise a collective exception at the
# end.
responses = {}
exceptions = []
for fabric_name, thread in threads.iteritems():
try:
responses[fabric_name] = thread.wait()
except Exception as e:
"""
FabricExceptions can indicate that a backtrace is not required
if they contain sufficient debug information themselves.
"""
if (not isinstance(e, exception.FabricException) or
e.backtrace_needed):
LOG.exception(e)
exceptions.append(e)
# If any exceptions were raised, we throw an exception that
# encapsulates them all.
if exceptions:
raise exception.ZoneManagerParallel(exceptions)
return responses
示例5: GreenPool
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
for service in services.getElementsByTagName('service'):
service_name = service.getElementsByTagName('name')[0].firstChild.nodeValue
# host='localhost', exchange='metadata', service_type='call', routing_key='meta_queue', on_request_name = 'on_request_metadata'
service_params = {}
service_params['service_type'] = service.attributes['type'].firstChild.nodeValue
service_params['host'] = service.getElementsByTagName('host')[0].firstChild.nodeValue
service_params['exchange'] = service.getElementsByTagName('params')[0].getElementsByTagName('exchange')[0].firstChild.nodeValue
service_params['routing_key'] = service.getElementsByTagName('params')[0].getElementsByTagName('queue')[0].firstChild.nodeValue
service_params['on_request_name'] = service.getElementsByTagName('params')[0].getElementsByTagName('callback')[0].firstChild.nodeValue
services_dict[service_name] = service_params
return services_dict
#For Test
if __name__ == '__main__':
import threading
pool = GreenPool()
services = get_services()
service = sys.argv[1]
if service in services.keys():
pool.spawn(Worker().start(**services.get(service, {})))
else: print 'no such service'
示例6: ServiceContainer
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
class ServiceContainer(object):
def __init__(self, service_cls, worker_ctx_cls, config):
self.service_cls = service_cls
self.worker_ctx_cls = worker_ctx_cls
self.service_name = get_service_name(service_cls)
self.config = config
self.max_workers = config.get(MAX_WORKERS_KEY) or DEFAULT_MAX_WORKERS
self.dependencies = DependencySet()
for dep in prepare_dependencies(self):
self.dependencies.add(dep)
self.started = False
self._worker_pool = GreenPool(size=self.max_workers)
self._active_threads = set()
self._protected_threads = set()
self._being_killed = False
self._died = Event()
@property
def entrypoints(self):
return filter(is_entrypoint_provider, self.dependencies)
@property
def injections(self):
return filter(is_injection_provider, self.dependencies)
def start(self):
""" Start a container by starting all the dependency providers.
"""
_log.debug('starting %s', self)
self.started = True
with log_time(_log.debug, 'started %s in %0.3f sec', self):
self.dependencies.all.prepare()
self.dependencies.all.start()
def stop(self):
""" Stop the container gracefully.
First all entrypoints are asked to ``stop()``.
This ensures that no new worker threads are started.
It is the providers' responsibility to gracefully shut down when
``stop()`` is called on them and only return when they have stopped.
After all entrypoints have stopped the container waits for any
active workers to complete.
After all active workers have stopped the container stops all
injections.
At this point there should be no more managed threads. In case there
are any managed threads, they are killed by the container.
"""
if self._died.ready():
_log.debug('already stopped %s', self)
return
_log.debug('stopping %s', self)
with log_time(_log.debug, 'stopped %s in %0.3f sec', self):
dependencies = self.dependencies
# entrypoint deps have to be stopped before injection deps
# to ensure that running workers can successfully complete
dependencies.entrypoints.all.stop()
# there might still be some running workers, which we have to
# wait for to complete before we can stop injection dependencies
self._worker_pool.waitall()
# it should be safe now to stop any injection as there is no
# active worker which could be using it
dependencies.injections.all.stop()
# finally, stop nested dependencies
dependencies.nested.all.stop()
# just in case there was a provider not taking care of its workers,
# or a dependency not taking care of its protected threads
self._kill_active_threads()
self._kill_protected_threads()
self.started = False
self._died.send(None)
def kill(self, exc):
""" Kill the container in a semi-graceful way.
All non-protected managed threads are killed first. This includes
all active workers generated by :meth:`ServiceContainer.spawn_worker`.
Next, dependencies are killed. Finally, any remaining protected threads
are killed.
#.........这里部分代码省略.........
示例7: ServiceContainer
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
#.........这里部分代码省略.........
# protect against extensions that throw during kill; the container
# is already dying with an exception, so ignore anything else
def safely_kill_extensions(ext_set):
try:
ext_set.kill()
except Exception as exc:
_log.warning('Extension raised `%s` during kill', exc)
safely_kill_extensions(self.entrypoints.all)
self._kill_worker_threads()
safely_kill_extensions(self.extensions.all)
self._kill_managed_threads()
self.started = False
# if `kill` is called after `stop`, they race to send this
if not self._died.ready():
self._died.send(None, exc_info)
def wait(self):
""" Block until the container has been stopped.
If the container was stopped due to an exception, ``wait()`` will
raise it.
Any unhandled exception raised in a managed thread or in the
worker lifecycle (e.g. inside :meth:`DependencyProvider.worker_setup`)
results in the container being ``kill()``ed, and the exception
raised from ``wait()``.
"""
return self._died.wait()
def spawn_worker(self, entrypoint, args, kwargs,
context_data=None, handle_result=None):
""" Spawn a worker thread for running the service method decorated
by `entrypoint`.
``args`` and ``kwargs`` are used as parameters for the service method.
``context_data`` is used to initialize a ``WorkerContext``.
``handle_result`` is an optional function which may be passed
in by the entrypoint. It is called with the result returned
or error raised by the service method. If provided it must return a
value for ``result`` and ``exc_info`` to propagate to dependencies;
these may be different to those returned by the service method.
"""
if self._being_killed:
_log.info("Worker spawn prevented due to being killed")
raise ContainerBeingKilled()
service = self.service_cls()
worker_ctx = WorkerContext(
self, service, entrypoint, args, kwargs, data=context_data
)
_log.debug('spawning %s', worker_ctx)
gt = self._worker_pool.spawn(
self._run_worker, worker_ctx, handle_result
)
gt.link(self._handle_worker_thread_exited, worker_ctx)
self._worker_threads[worker_ctx] = gt
return worker_ctx
示例8: Service
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
class Service(ConsumerMixin):
def __init__(self, controllercls,
connection, exchange, topic,
pool=None, poolsize=1000):
self.nodeid = UIDGEN()
if pool is None:
self.procpool = GreenPool(size=poolsize)
else:
self.procpool = pool
self.connection = connection
self.controller = controllercls()
self.topic = topic
self.greenlet = None
self.messagesem = Semaphore()
self.consume_ready = Event()
node_topic = "{}.{}".format(self.topic, self.nodeid)
self.queues = [entities.get_topic_queue(exchange, topic),
entities.get_topic_queue(exchange, node_topic),
entities.get_fanout_queue(topic), ]
self._channel = None
self._consumers = None
def start(self):
# self.connection = newrpc.create_connection()
if self.greenlet is not None and not self.greenlet.dead:
raise RuntimeError()
self.greenlet = eventlet.spawn(self.run)
def get_consumers(self, Consumer, channel):
return [Consumer(self.queues, callbacks=[self.on_message, ]), ]
def on_consume_ready(self, connection, channel, consumers, **kwargs):
self._consumers = consumers
self._channel = channel
self.consume_ready.send(None)
def on_consume_end(self, connection, channel):
self.consume_ready.reset()
def on_message(self, body, message):
# need a semaphore to stop killing between message ack()
# and spawning process.
with self.messagesem:
self.procpool.spawn(self.handle_request, body)
message.ack()
def handle_request(self, body):
newrpc.process_message(self.connection, self.controller, body)
def wait(self):
try:
self.greenlet.wait()
except greenlet.GreenletExit:
pass
return self.procpool.waitall()
def kill(self):
if self.greenlet is not None and not self.greenlet.dead:
self.should_stop = True
#with self.messagesem:
#self.greenlet.kill()
self.greenlet.wait()
if self._consumers:
for c in self._consumers:
c.cancel()
if self._channel is not None:
self._channel.close()
def link(self, *args, **kwargs):
return self.greenlet.link(*args, **kwargs)
def kill_processes(self):
for g in self.procpool.coroutines_running:
g.kill()
示例9: ServiceContainer
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
#.........这里部分代码省略.........
_log.debug('stopping %s', self)
with _log_time('stopped %s', self):
dependencies = self.dependencies
# entrypoint deps have to be stopped before injection deps
# to ensure that running workers can successfully complete
dependencies.entrypoints.all.stop()
# there might still be some running workers, which we have to
# wait for to complete before we can stop injection dependencies
self._worker_pool.waitall()
# it should be safe now to stop any injection as there is no
# active worker which could be using it
dependencies.injections.all.stop()
# finally, stop nested dependencies
dependencies.nested.all.stop()
# just in case there was a provider not taking care of its workers,
# or a dependency not taking care of its protected threads
self._kill_active_threads()
self._kill_protected_threads()
self.started = False
self._died.send(None)
def kill(self, exc_info=None):
""" Kill the container in a semi-graceful way.
All non-protected managed threads are killed first. This includes
all active workers generated by :meth:`ServiceContainer.spawn_worker`.
Next, dependencies are killed. Finally, any remaining protected threads
are killed.
If ``exc_info`` is provided, the exception will be raised by
:meth:`~wait``.
"""
if self._being_killed:
# this happens if a managed thread exits with an exception
# while the container is being killed or if multiple errors
# happen simultaneously
_log.debug('already killing %s ... waiting for death', self)
try:
self._died.wait()
except:
pass # don't re-raise if we died with an exception
return
self._being_killed = True
if self._died.ready():
_log.debug('already stopped %s', self)
return
if exc_info is not None:
_log.info('killing %s due to %s', self, exc_info[1])
else:
_log.info('killing %s', self)
# protect against dependencies that throw during kill; the container
# is already dying with an exception, so ignore anything else
def safely_kill_dependencies(dep_set):
try:
示例10: AsynchronousSection
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
class AsynchronousSection(object):
"""Allows calling function asynchronously with waiting on exit."""
MIN_POOL_SIZE = 1
def __init__(self, size=0, ignore_errors_num=0):
"""Initialises.
:param size: the max number of parallel tasks
:param ignore_errors_num:
number of errors which does not stop the execution
"""
self.executor = GreenPool(max(size, self.MIN_POOL_SIZE))
self.ignore_errors_num = ignore_errors_num
self.errors = []
self.tasks = set()
def __enter__(self):
self.errors[:] = []
return self
def __exit__(self, etype, *_):
self.wait(etype is not None)
def execute(self, func, *args, **kwargs):
"""Calls function asynchronously."""
if 0 <= self.ignore_errors_num < len(self.errors):
raise RuntimeError("Too many errors.")
gt = self.executor.spawn(func, *args, **kwargs)
self.tasks.add(gt)
gt.link(self.on_complete)
return gt
def on_complete(self, gt):
"""Callback to handle task completion."""
try:
gt.wait()
except Exception as e:
logger.error("Task failed: %s", six.text_type(e))
self.errors.append(sys.exc_info())
finally:
self.tasks.discard(gt)
def wait(self, ignore_errors=False):
"""Waits until all tasks will be completed.
Do not use directly, will be called from context manager.
"""
self.executor.waitall()
if len(self.errors) > 0:
for exc_info in self.errors:
logger.exception("error details.", exc_info=exc_info)
self.errors[:] = []
if not ignore_errors:
raise RuntimeError(
"Operations completed with errors.\n"
"See log for more details."
)
示例11: __init__
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
#.........这里部分代码省略.........
'before retrying a failed '
'operation'))
self.conf.append_config_values(fc_fabric_opts)
# If running in a process without fabric CLI options passed, allow
# those options to be given and set via the overrides param, which
# is a list of (key, value) tuples.
if overrides:
for item in overrides:
self.conf.local_conf.set_override(item[0], item[1])
# Now we initialise a connection to the switch for each of the fabrics
# This function is called in a GreenThread for each registered switch.
def _do_connect(context, fabric_name):
# Mark this thread as running the passed-in context, so that log
# messages can be correlated.
context.update_store()
@lockutils.synchronized(fabric_name, 'fcfabric-', True)
def _do_locked_connect(fabric_name):
fabric_ip = self.conf.safe_get('fc_fabric_address_' +
fabric_name)
fabric_user = self.conf.safe_get('fc_fabric_user_' +
fabric_name)
fabric_pwd = self.conf.safe_get('fc_fabric_password_' +
fabric_name)
fabric_port = self.conf.safe_get('fc_fabric_port_' +
fabric_name)
fabric_timeout = self.conf.safe_get('fc_fabric_timeout_' +
fabric_name)
fabric_cmd_timeout = \
self.conf.safe_get('fc_fabric_cmd_timeout_' +
fabric_name)
fabric_display_name = \
self.conf.safe_get('fc_fabric_display_name_' +
fabric_name)
fabric_num_retries = \
self.conf.safe_get('fc_fabric_num_attempts_' +
fabric_name)
fabric_min_retry_gap = \
self.conf.safe_get('fc_fabric_min_retry_gap_' +
fabric_name)
fabric_max_retry_gap = \
self.conf.safe_get('fc_fabric_max_retry_gap_' +
fabric_name)
descriptor = exception.FabricDescriptor(fabric_name,
fabric_display_name,
fabric_user,
fabric_ip,
fabric_port)
conn = BrcdFCZoneClientCLI(fabric_ip, fabric_user,
fabric_pwd, fabric_port,
fabric_timeout,
fabric_cmd_timeout,
descriptor,
fabric_num_retries,
fabric_min_retry_gap,
fabric_max_retry_gap)
return conn
return _do_locked_connect(fabric_name)
# Start a GreenThread for each fabric that we will connect to and
# initiate the connection in it.
pool = GreenPool(size=len(fabric_names))
# Obtain our current context so that we can make sure that our child
# threads have the same context, so that we can correlate log messages
# that they generate.
context = getattr(local.store, 'context', None)
threads = {}
for fabric_name in fabric_names:
thread = pool.spawn(_do_connect, context, fabric_name)
threads[fabric_name] = thread
# Collect the resulting connection objects.
# The wait() will raise an exception if something went wrong.
exceptions = []
for fabric_name, thread in threads.iteritems():
try:
self.fabrics[fabric_name] = thread.wait()
LOG.info(_("Connection established to fabric %(f_name)s") %
dict(f_name=fabric_name))
except Exception as e:
exceptions.append(e)
# If any exceptions were raised, we throw an exception that
# encapsulates them all.
if exceptions:
raise exception.ZoneManagerParallel(exceptions)
示例12: LocalNode
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
class LocalNode(Node):
def __init__(self, path=None, id=None, master=None, settings=None, log_level=logging.WARNING, baron=None, address=None):
self.baron = baron
self.path = os.path.abspath(path or '.')
if id is None:
id = platform.node()
self.id = id
if not os.path.exists(self.path):
os.makedirs(path)
self.log_level = log_level
self.address = address
self._socket = None
self._started = False
self._failed = False
self.services = []
self._service_map = {}
self._deployments = []
self._keys = set() # A set of keys allowed to edit things.
self._node_map = {id: self}
self.master = master or self
self.neighbors = [] # Any node we know about.
self.vassals = []
self.rogue = [] # TODO: Put nodes that should be vassals
# but don't recognize us here.
self._pool = GreenPool()
if (self.master != self):
self._node_map[self.master.id] = self.master
self.dispatcher = Dispatcher(self)
self.load_settings(settings=settings)
print "Sovereign node (%s) created at %s" % (self.id, self.path)
print "", "- primary authentication key:", self.key
def serve(self, address=None):
"""
Serves the rest client at *address*.
"""
if self._socket:
self.close()
try:
self._socket = self.build_socket(address or self.address)
self.address = self._socket.getsockname()
self.start()
self._started = True
print "listening on http://%s:%s" % self.address
wsgi.server(self._socket, self, log=FileLikeLogger(logging))
self._socket = None
except Exception:
self._failed = True
raise
logging.exception("Error binding address.")
finally:
self.close()
def nanny(self):
"""
Waits for the node to start and returns True if it succeeds.
Usefull for testing.
"""
while not self._started and not self._failed:
eventlet.sleep(.01)
return not self._failed
def spawn_thread(self, func, *args, **kwargs):
thread = self._pool.spawn(func, *args, **kwargs)
eventlet.sleep(0)
return thread
def start(self):
for service in self.services:
if not service.started and not service.disabled:
service.deploy()
def stop(self):
for service in self.services:
if service.started:
service.stop()
def close(self):
if self._socket:
try:
self._socket.close()
except:
logging.exception("Socket will not shutdown.")
pass
#.........这里部分代码省略.........
示例13: entrance
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
def entrance():
pool = GreenPool(100)
for x in range(10):
pool.spawn(fetcher)
pool.waitall()
示例14: Service
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn [as 别名]
class Service(ConsumerMixin):
def __init__(
self, controllercls, connection_factory, exchange, topic,
pool=None, poolsize=1000):
self.nodeid = UIDGEN()
self.max_workers = poolsize
if pool is None:
self.procpool = GreenPool(size=poolsize)
else:
self.procpool = pool
self.controller = controllercls()
self.service = self.controller
self.topic = topic
self.greenlet = None
self.consume_ready = Event()
node_topic = "{}.{}".format(self.topic, self.nodeid)
self.nova_queues = [
entities.get_topic_queue(exchange, topic),
entities.get_topic_queue(exchange, node_topic),
entities.get_fanout_queue(topic), ]
self._channel = None
self._consumers = None
self.connection = connection_factory()
self.connection_factory = connection_factory
inject_dependencies(self.controller, self)
self._connection_pool = Pool(
max_size=self.procpool.size,
create=connection_factory
)
self.workers = set()
self._pending_ack_messages = []
self._pending_requeue_messages = []
self._do_cancel_consumers = False
self._consumers_cancelled = Event()
self._timers = list(get_timers(self.controller))
def start(self):
self.start_timers()
# greenlet has a magic attribute ``dead`` - pylint: disable=E1101
if self.greenlet is not None and not self.greenlet.dead:
raise RuntimeError()
self.greenlet = eventlet.spawn(self.run)
def start_timers(self):
for timer in self._timers:
timer.start()
def get_consumers(self, Consumer, channel):
nova_consumer = Consumer(
self.nova_queues, callbacks=[self.on_nova_message, ])
consume_consumers = get_consumers(
Consumer, self, self.on_consume_message)
consumers = [nova_consumer] + list(consume_consumers)
prefetch_count = self.procpool.size
for consumer in consumers:
consumer.qos(prefetch_count=prefetch_count)
return consumers
def on_consume_ready(self, connection, channel, consumers, **kwargs):
self._consumers = consumers
self._channel = channel
self.consume_ready.send(None)
def on_consume_end(self, connection, channel):
self.consume_ready.reset()
def on_nova_message(self, body, message):
_log.debug('spawning RPC worker (%d free)', self.procpool.free())
gt = self.procpool.spawn(self.handle_rpc_message, body)
gt.link(self.handle_rpc_message_processed, message)
self.workers.add(gt)
def on_consume_message(self, consumer_method_config, body, message):
_log.debug('spawning consume worker (%d free)', self.procpool.free())
gt = self.procpool.spawn(
self.handle_consume_message, consumer_method_config, body, message)
gt.link(self.handle_consume_message_processed)
self.workers.add(gt)
def handle_rpc_message(self, body):
# item is patched on for python with ``with``, pylint can't find it
# pylint: disable=E1102
with self._connection_pool.item() as connection:
#.........这里部分代码省略.........