本文整理汇总了Python中eventlet.greenpool.GreenPool类的典型用法代码示例。如果您正苦于以下问题:Python GreenPool类的具体用法?Python GreenPool怎么用?Python GreenPool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GreenPool类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tests
def tests(status, test):
pool = GreenPool(size=500)
for host, s in status['servers'].iteritems():
for t in test:
if t.name in s:
pool.spawn_n(t.test, host, s)
pool.waitall()
示例2: run_once
def run_once(self, *args, **kwargs):
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.report_containers = 0
containers_to_delete = []
try:
self.logger.debug(_('Run begin'))
containers, objects = \
self.swift.get_account_info(self.sample_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.sample_account):
container = c['name']
try:
timestamp, account = container.split('_', 1)
timestamp = float(timestamp)
except ValueError:
self.logger.debug('ValueError: %s, '
'need more than 1 value to unpack' % \
container)
else:
if processes > 0:
obj_proc = int(hashlib.md5(container).hexdigest(), 16)
if obj_proc % processes != process:
continue
n = (float(time()) // self.sample_rate) * self.sample_rate
if timestamp <= n:
containers_to_delete.append(container)
pool.spawn_n(self.aggregate_container, container)
pool.waitall()
for container in containers_to_delete:
try:
self.logger.debug('delete container: %s' % container)
self.swift.delete_container(self.sample_account, container,
acceptable_statuses=(
2, HTTP_NOT_FOUND,
HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %s %s') %
(container, str(err)))
tenants_to_fillup = list()
for c in self.swift.iter_containers(self.aggregate_account):
tenant_id = c['name']
if processes > 0:
c_proc = int(hashlib.md5(tenant_id).hexdigest(), 16)
if c_proc % processes != process:
continue
tenants_to_fillup.append(tenant_id)
# fillup lossed usage data
self.fillup_lossed_usage_data(tenants_to_fillup)
self.logger.debug(_('Run end'))
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
示例3: run_once
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = []
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.expiring_objects_account):
container = c['name']
timestamp = int(container)
if timestamp > int(time()):
break
containers_to_delete.append(container)
for o in self.swift.iter_objects(self.expiring_objects_account,
container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
pool.spawn_n(
self.delete_object, actual_obj, timestamp,
container, obj)
pool.waitall()
for container in containers_to_delete:
try:
self.swift.delete_container(
self.expiring_objects_account,
container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %s %s') %
(container, str(err)))
self.logger.debug(_('Run end'))
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
示例4: test_high_client_load
def test_high_client_load():
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6805")
clients = GreenPool()
for i in xrange(0, 100):
clients.spawn(fake_client, "tcp://127.0.0.1:6804",
"%s:%s" % (os.getpid(), i))
clients.waitall()
示例5: update_macmap
def update_macmap(configmanager):
"""Interrogate switches to build/update mac table
Begin a rebuild process. This process is a generator that will yield
as each switch interrogation completes, allowing a caller to
recheck the cache as results become possible, rather
than having to wait for the process to complete to interrogate.
"""
global _macmap
global _nodesbymac
global _switchportmap
# Clear all existing entries
_macmap = {}
_nodesbymac = {}
_switchportmap = {}
if configmanager.tenant is not None:
raise exc.ForbiddenRequest('Network topology not available to tenants')
nodelocations = configmanager.get_node_attributes(
configmanager.list_nodes(), ('hardwaremanagement.switch',
'hardwaremanagement.switchport'))
switches = set([])
for node in nodelocations:
cfg = nodelocations[node]
if 'hardwaremanagement.switch' in cfg:
curswitch = cfg['hardwaremanagement.switch']['value']
switches.add(curswitch)
if 'hardwaremanagement.switchport' in cfg:
portname = cfg['hardwaremanagement.switchport']['value']
if curswitch not in _switchportmap:
_switchportmap[curswitch] = {}
if portname in _switchportmap[curswitch]:
log.log({'warning': 'Duplicate switch topology config for '
'{0} and {1}'.format(node,
_switchportmap[
curswitch][
portname])})
_switchportmap[curswitch][portname] = node
switchcfg = configmanager.get_node_attributes(
switches, ('secret.hardwaremanagementuser',
'secret.hardwaremanagementpassword'), decrypt=True)
switchauth = []
for switch in switches:
password = 'public'
user = None
if (switch in switchcfg and
'secret.hardwaremanagementpassword' in switchcfg[switch]):
password = switchcfg[switch]['secret.hardwaremanagementpassword'][
'value']
if 'secret.hardwaremanagementuser' in switchcfg[switch]:
user = switchcfg[switch]['secret.hardwaremanagementuser'][
'value']
switchauth.append((switch, password, user))
pool = GreenPool()
for res in pool.imap(_map_switch, switchauth):
yield res
print(repr(_macmap))
示例6: run_once
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = set([])
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug('Run begin')
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; '
'%(containers)s possible containers; '
'%(objects)s possible objects') % {
'containers': containers, 'objects': objects})
for container, obj in self.iter_cont_objs_to_expire():
containers_to_delete.add(container)
if not obj:
continue
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
pool.spawn_n(
self.delete_object, actual_obj, timestamp,
container, obj)
pool.waitall()
for container in containers_to_delete:
try:
self.swift.delete_container(
self.expiring_objects_account,
container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %(container)s '
'%(err)s') % {'container': container,
'err': str(err)})
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
示例7: _parallel_execute
def _parallel_execute(self, operation, *args):
def _spawn(context, operation, fabric_name, conn, *args):
# Inherit this thread's context from the parent
context.update_store()
@lockutils.synchronized(fabric_name, 'fcfabric-', True)
def _locked_spawn(operation, fabric_name, conn, *args):
return operation(fabric_name, conn, *args)
return _locked_spawn(operation, fabric_name, conn, *args)
"""
Perform an operation against all fabrics, consolidate the responses
into a dictionary keyed on fabric name.
"""
pool = GreenPool(size=len(self.fabrics))
# Obtain our current context so that we can make sure that our child
# threads have the same context, so that we can correlate log messages
# that they generate.
context = getattr(local.store, 'context', None)
threads = {}
for fabric_name, conn in self.fabrics.iteritems():
thread = pool.spawn(_spawn, context, operation, fabric_name, conn,
*args)
threads[fabric_name] = thread
# Collect the responses. This may raise exceptions when we call wait()
# If they do, we collect them and raise a collective exception at the
# end.
responses = {}
exceptions = []
for fabric_name, thread in threads.iteritems():
try:
responses[fabric_name] = thread.wait()
except Exception as e:
"""
FabricExceptions can indicate that a backtrace is not required
if they contain sufficient debug information themselves.
"""
if (not isinstance(e, exception.FabricException) or
e.backtrace_needed):
LOG.exception(e)
exceptions.append(e)
# If any exceptions were raised, we throw an exception that
# encapsulates them all.
if exceptions:
raise exception.ZoneManagerParallel(exceptions)
return responses
示例8: test_high_workload
def test_high_workload():
# fire up three services to receive in roundrobin style, giving
# each an ident so we can make sure they're working that way
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 1)
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 2)
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 3)
clients = GreenPool()
# fire up a bunch of clients to thrash it at random
for i in xrange(0, 100):
clients.spawn(fake_client, "tcp://127.0.0.1:6802", "%s:%s" % (os.getpid(), i))
clients.waitall()
示例9: discovery
def discovery(status, test):
pool = GreenPool(size=500)
for d in settings.discovery:
servers = d().get_servers() # [('ip', 'host')]
for server in servers:
ip = server[0]
host = server[1]
if host in settings.exclude:
continue
if host not in status["servers"]: # do discovery
status["servers"][host] = {}
logging.info("performing discovery on %r", server)
for t in test:
pool.spawn_n(t.discover, ip, status["servers"][host])
status["servers"][host]["ip"] = ip
pool.waitall()
示例10: __init__
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = Configuration(volume_manager_opts,
config_group=service_name)
self._tp = GreenPool()
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warn(_("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
if volume_driver == 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver':
# Deprecated in Havana
# Not handled in MAPPING because it requires setting a conf option
LOG.warn(_("ThinLVMVolumeDriver is deprecated, please configure "
"LVMISCSIDriver and lvm_type=thin. Continuing with "
"those settings."))
volume_driver = 'cinder.volume.drivers.lvm.LVMISCSIDriver'
self.configuration.lvm_type = 'thin'
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db)
示例11: imap
def imap(requests, prefetch=True, size=2):
"""Concurrently converts a generator object of Requests to
a generator of Responses.
:param requests: a generator of Request objects.
:param prefetch: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. default is 2
"""
def send(r):
r.send(prefetch)
return r.response
pool = GreenPool(size)
for r in pool.imap(send, requests):
yield r
pool.waitall()
示例12: imap
def imap(self, func, *args):
reqid = request_id()
def impl(*args):
set_request_id(reqid)
return func(*args)
return GreenPool.imap(self, impl, *args)
示例13: map
def map(requests, prefetch=True, size=None):
"""Concurrently converts a list of Requests to Responses.
:param requests: a collection of Request objects.
:param prefetch: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. If None, no throttling occurs.
"""
requests = list(requests)
pool = GreenPool(size) if size else None
jobs = [send(r, pool, prefetch=prefetch) for r in requests]
if pool is not None:
pool.waitall()
else:
[j.wait() for j in jobs]
return [r.response for r in requests]
示例14: __init__
def __init__(self, games_url):
"""
:param thread_pool:
"""
self._data = _WorkerManagerData()
self.games_url = games_url
self._pool = GreenPool(size=3)
super(WorkerManager, self).__init__()
示例15: imap
def imap(requests, stream=False, size=2):
"""Concurrently converts a generator object of Requests to
a generator of Responses.
:param requests: a generator of Request objects.
:param stream: If True, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. default is 2
"""
pool = GreenPool(size)
def send(r):
return r.send(stream=stream)
for r in pool.imap_unordered(send, requests):
yield r
pool.waitall()