本文整理汇总了Python中eventlet.greenpool.GreenPool.waitall方法的典型用法代码示例。如果您正苦于以下问题:Python GreenPool.waitall方法的具体用法?Python GreenPool.waitall怎么用?Python GreenPool.waitall使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eventlet.greenpool.GreenPool
的用法示例。
在下文中一共展示了GreenPool.waitall方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tests
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def tests(status, test):
pool = GreenPool(size=500)
for host, s in status['servers'].iteritems():
for t in test:
if t.name in s:
pool.spawn_n(t.test, host, s)
pool.waitall()
示例2: run_once
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = []
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.expiring_objects_account):
container = c['name']
timestamp = int(container)
if timestamp > int(time()):
break
containers_to_delete.append(container)
for o in self.swift.iter_objects(self.expiring_objects_account,
container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
pool.spawn_n(
self.delete_object, actual_obj, timestamp,
container, obj)
pool.waitall()
for container in containers_to_delete:
try:
self.swift.delete_container(
self.expiring_objects_account,
container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %s %s') %
(container, str(err)))
self.logger.debug(_('Run end'))
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
示例3: run_once
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def run_once(self, *args, **kwargs):
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.report_containers = 0
containers_to_delete = []
try:
self.logger.debug(_('Run begin'))
containers, objects = \
self.swift.get_account_info(self.sample_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.sample_account):
container = c['name']
try:
timestamp, account = container.split('_', 1)
timestamp = float(timestamp)
except ValueError:
self.logger.debug('ValueError: %s, '
'need more than 1 value to unpack' % \
container)
else:
if processes > 0:
obj_proc = int(hashlib.md5(container).hexdigest(), 16)
if obj_proc % processes != process:
continue
n = (float(time()) // self.sample_rate) * self.sample_rate
if timestamp <= n:
containers_to_delete.append(container)
pool.spawn_n(self.aggregate_container, container)
pool.waitall()
for container in containers_to_delete:
try:
self.logger.debug('delete container: %s' % container)
self.swift.delete_container(self.sample_account, container,
acceptable_statuses=(
2, HTTP_NOT_FOUND,
HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %s %s') %
(container, str(err)))
tenants_to_fillup = list()
for c in self.swift.iter_containers(self.aggregate_account):
tenant_id = c['name']
if processes > 0:
c_proc = int(hashlib.md5(tenant_id).hexdigest(), 16)
if c_proc % processes != process:
continue
tenants_to_fillup.append(tenant_id)
# fillup lossed usage data
self.fillup_lossed_usage_data(tenants_to_fillup)
self.logger.debug(_('Run end'))
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
示例4: test_high_client_load
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def test_high_client_load():
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6805")
clients = GreenPool()
for i in xrange(0, 100):
clients.spawn(fake_client, "tcp://127.0.0.1:6804",
"%s:%s" % (os.getpid(), i))
clients.waitall()
示例5: run_once
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = set([])
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug('Run begin')
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; '
'%(containers)s possible containers; '
'%(objects)s possible objects') % {
'containers': containers, 'objects': objects})
for container, obj in self.iter_cont_objs_to_expire():
containers_to_delete.add(container)
if not obj:
continue
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
pool.spawn_n(
self.delete_object, actual_obj, timestamp,
container, obj)
pool.waitall()
for container in containers_to_delete:
try:
self.swift.delete_container(
self.expiring_objects_account,
container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %(container)s '
'%(err)s') % {'container': container,
'err': str(err)})
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
示例6: test_high_workload
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def test_high_workload():
# fire up three services to receive in roundrobin style, giving
# each an ident so we can make sure they're working that way
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 1)
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 2)
eventlet.spawn_n(fake_service, "tcp://127.0.0.1:6900", 3)
clients = GreenPool()
# fire up a bunch of clients to thrash it at random
for i in xrange(0, 100):
clients.spawn(fake_client, "tcp://127.0.0.1:6802", "%s:%s" % (os.getpid(), i))
clients.waitall()
示例7: test_high_client_load
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def test_high_client_load():
test_context = {'clients': 0, 'services': 0}
pool = GreenPool()
pool.spawn(fake_service,
"tcp://127.0.0.1:6801", test_context)
for i in xrange(0, 10):
pool.spawn(fake_client, "tcp://127.0.0.1:6800",
"%s" % i, test_context)
pool.waitall()
assert_equal(test_context['clients'], 10)
assert_equal(test_context['services'], 100)
示例8: run_once
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
for o in self.swift.iter_objects(self.restoring_object_account,
self.todo_container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (self.todo_container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
pool.spawn_n(self.start_object_restoring, obj)
pool.waitall()
for o in self.swift.iter_objects(self.restoring_object_account,
self.restoring_container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (self.restoring_container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
pool.spawn_n(self.check_object_restored, obj)
pool.waitall()
self.logger.debug(_('Run end'))
self.report(final=True)
except (Exception, Timeout) as e:
report_exception(self.logger, _('Unhandled exception'), self.client)
示例9: discovery
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def discovery(status, test):
pool = GreenPool(size=500)
for d in settings.discovery:
servers = d().get_servers() # [('ip', 'host')]
for server in servers:
ip = server[0]
host = server[1]
if host in settings.exclude:
continue
if host not in status["servers"]: # do discovery
status["servers"][host] = {}
logging.info("performing discovery on %r", server)
for t in test:
pool.spawn_n(t.discover, ip, status["servers"][host])
status["servers"][host]["ip"] = ip
pool.waitall()
示例10: imap
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def imap(requests, prefetch=True, size=2):
"""Concurrently converts a generator object of Requests to
a generator of Responses.
:param requests: a generator of Request objects.
:param prefetch: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. default is 2
"""
def send(r):
r.send(prefetch)
return r.response
pool = GreenPool(size)
for r in pool.imap(send, requests):
yield r
pool.waitall()
示例11: imap
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def imap(requests, stream=False, size=2):
"""Concurrently converts a generator object of Requests to
a generator of Responses.
:param requests: a generator of Request objects.
:param stream: If True, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. default is 2
"""
pool = GreenPool(size)
def send(r):
return r.send(stream=stream)
for r in pool.imap_unordered(send, requests):
yield r
pool.waitall()
示例12: map
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def map(requests, prefetch=True, size=None):
"""Concurrently converts a list of Requests to Responses.
:param requests: a collection of Request objects.
:param prefetch: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. If None, no throttling occurs.
"""
requests = list(requests)
pool = GreenPool(size) if size else None
jobs = [send(r, pool, prefetch=prefetch) for r in requests]
if pool is not None:
pool.waitall()
else:
[j.wait() for j in jobs]
return [r.response for r in requests]
示例13: worker
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
import httplib2 directly, httplib2 doesn't natively support cooperative
yielding.
monkey_patch httplib2 will make httplib2 green.
'''
from eventlet.greenpool import GreenPool
import eventlet
import random
import httplib2
# uncomment this line to green httplib2
# httplib2 = eventlet.import_patched('httplib2')
def worker(url):
print "worker "+str(random.random())
h = httplib2.Http()
resp, content = h.request(url, "GET")
return resp
pool = GreenPool(size=10)
results = pool.imap(worker, open("urls.txt", 'r'))
for result in results:
print result
print "done...."
pool.waitall()
示例14: run_once
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
# This if-clause will be removed when general task queue feature is
# implemented.
if not self.dequeue_from_legacy:
self.logger.info('This node is not configured to dequeue tasks '
'from the legacy queue. This node will '
'not process any expiration tasks. At least '
'one node in your cluster must be configured '
'with dequeue_from_legacy == true.')
return
self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug('Run begin')
task_account_container_list_to_delete = list()
for task_account, my_index, divisor in \
self.iter_task_accounts_to_expire():
container_count, obj_count = \
self.swift.get_account_info(task_account)
# the task account is skipped if there are no task container
if not container_count:
continue
self.logger.info(_(
'Pass beginning for task account %(account)s; '
'%(container_count)s possible containers; '
'%(obj_count)s possible objects') % {
'account': task_account,
'container_count': container_count,
'obj_count': obj_count})
task_account_container_list = \
[(task_account, task_container) for task_container in
self.iter_task_containers_to_expire(task_account)]
task_account_container_list_to_delete.extend(
task_account_container_list)
# delete_task_iter is a generator to yield a dict of
# task_account, task_container, task_object, delete_timestamp,
# target_path to handle delete actual object and pop the task
# from the queue.
delete_task_iter = \
self.round_robin_order(self.iter_task_to_expire(
task_account_container_list, my_index, divisor))
for delete_task in delete_task_iter:
pool.spawn_n(self.delete_object, **delete_task)
pool.waitall()
for task_account, task_container in \
task_account_container_list_to_delete:
try:
self.swift.delete_container(
task_account, task_container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %(account)s '
'%(container)s %(err)s') % {
'account': task_account,
'container': task_container, 'err': str(err)})
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
示例15: glob
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import waitall [as 别名]
infiles = glob('SNAP_ASCII/RUN_ANALYSIS001/gen*.clones')
outfiles = glob('SNAP_ASCII/RUN_ANALYSIS002/gen*.clones')
infiles.sort()
outfiles.sort()
# ifname = file with the "ingroups" (tested for polymorphism)
# ofname = file with the "outgroups" (diff between in/out => divergance)
have_printed_header = False
MonKeyTestPath = glob('analys*/src/MonKeyTest')[0]
def process_generation(ifname,ofname):
global have_printed_header
gen_num = int(re.findall('gen(\d+)',ifname)[0])
gen_num2 = int(re.findall('gen(\d+)',ofname)[0])
assert(gen_num==gen_num2)
args = [MonKeyTestPath,'-1',ifname,'-2',ofname]
mkout = subprocess.check_output(args)
mklines = mkout.splitlines()
if not have_printed_header:
sys.stdout.write("generation\t"+mklines[0]+'\n')
have_printed_header = True
for line in mklines[1:]:
if line=='': next
sys.stdout.write("%i\t%s\n"%(gen_num,line))
gp = GreenPool(size=10)
for ifname,ofname in zip(infiles,outfiles):
process_generation(ifname,ofname)
# gp.spawn(process_generation,ifname,ofname)
gp.waitall()