本文整理汇总了Python中concurrent.futures.as_completed方法的典型用法代码示例。如果您正苦于以下问题:Python futures.as_completed方法的具体用法?Python futures.as_completed怎么用?Python futures.as_completed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类concurrent.futures
的用法示例。
在下文中一共展示了futures.as_completed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: runner
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def runner(k):
threadpool = thread.ThreadPoolExecutor(max_workers=k.get('threads'))
if k.get('verbose'):
info('Set %s threads..'%k.get('threads'))
futures = (threadpool.submit(requester,domain,k.get("proxy"),k.get("timeout"),
k.get("output"),k.get('process'),k.get('verbose')) for domain in k.get("domains"))
for i,results in enumerate(thread.as_completed(futures)):
if k.get('verbose') and k.get('d_list'):
str_ = "{i}{b:.2f}% Domain: {d}".format(
i=_info(),
b=PERCENT(int(i),
int(k.get('dict_len'))),d=k.get('domains')[i]
)
print_(str_)
else:
info('Domain: {}'.format(k.get('domains')[i]))
pass
示例2: count
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def count(self, lines):
# use the name server's prefix lookup to get all registered wordcounters
with locate_ns() as ns:
all_counters = ns.list(prefix="example.dc2.wordcount.")
# chop the text into chunks that can be distributed across the workers
# uses futures so that it runs the counts in parallel
# counter is selected in a round-robin fashion from list of all available counters
with futures.ThreadPoolExecutor() as pool:
roundrobin_counters = cycle(all_counters.values())
tasks = []
for chunk in grouper(200, lines):
tasks.append(pool.submit(self.count_chunk, next(roundrobin_counters), chunk))
# gather the results
print("Collecting %d results (counted in parallel)..." % len(tasks))
totals = Counter()
for task in futures.as_completed(tasks):
try:
totals.update(task.result())
except Pyro5.errors.CommunicationError as x:
raise Pyro5.errors.PyroError("Something went wrong in the server when collecting the responses: "+str(x))
return totals
示例3: screen
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def screen(self, start, width):
dr = width / self.res_x
di = dr*(self.res_x/self.res_y)
di *= 0.8 # aspect ratio correction
self.result = ["?"] * self.res_y
servers = [BatchProxy(Proxy(uri)) for uri in self.mandels]
with futures.ThreadPoolExecutor(max_workers=len(servers)*2) as pool:
for i in range(self.res_y):
server = servers[i % len(servers)]
server.calc_line(start, self.res_x, i*di, dr, i)
tasks = [pool.submit(server) for server in servers]
for task in futures.as_completed(tasks):
lines = task.result()
for (linenr, line) in lines:
self.result[linenr] = line
return "\n".join(self.result)
示例4: run
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def run(self, concurrent=10):
"""
Entry point.
:param concurrent: number of threads to use
:return: message json
"""
children = [self.stac_file]
logger.info(f"Using {concurrent} threads")
while True:
with futures.ThreadPoolExecutor(max_workers=int(concurrent)) as executor:
future_tasks = [executor.submit(self._validate, url) for url in children]
children = []
for task in futures.as_completed(future_tasks):
message, status, new_children = task.result()
self.status = self._update_status(self.status, status)
self.message.append(message)
children.extend(new_children)
if not children:
break
return json.dumps(self.message)
示例5: download_many
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def download_many(cc_list):
cc_list = cc_list[:5] # <1>
with futures.ThreadPoolExecutor(max_workers=3) as executor: # <2>
to_do = []
for cc in sorted(cc_list): # <3>
future = executor.submit(download_one, cc) # <4>
to_do.append(future) # <5>
msg = 'Scheduled for {}: {}'
print(msg.format(cc, future)) # <6>
results = []
for future in futures.as_completed(to_do): # <7>
res = future.result() # <8>
msg = '{} result: {!r}'
print(msg.format(future, res)) # <9>
results.append(res)
return len(results)
示例6: par_crop
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def par_crop(args):
"""
Dataset curation,crop data and transform the format of a label
"""
crop_path = os.path.join(args.download_dir, './crop{:d}'.format(args.instance_size))
if not os.path.isdir(crop_path): makedirs(crop_path)
VID_base_path = os.path.join(args.download_dir, './ILSVRC')
ann_base_path = os.path.join(VID_base_path, 'Annotations/DET/train/')
sub_sets = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i')
for sub_set in sub_sets:
sub_set_base_path = os.path.join(ann_base_path, sub_set)
if 'a' == sub_set:
xmls = sorted(glob.glob(os.path.join(sub_set_base_path, '*', '*.xml')))
else:
xmls = sorted(glob.glob(os.path.join(sub_set_base_path, '*.xml')))
n_imgs = len(xmls)
sub_set_crop_path = os.path.join(crop_path, sub_set)
with futures.ProcessPoolExecutor(max_workers=args.num_threads) as executor:
fs = [executor.submit(crop_xml, args, xml, sub_set_crop_path, args.instance_size) for xml in xmls]
for i, f in enumerate(futures.as_completed(fs)):
printProgress(i, n_imgs, prefix=sub_set, suffix='Done ', barLength=80)
示例7: par_crop
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def par_crop(args, ann_base_path):
"""
Dataset curation, crop data and transform the format of label
Parameters
----------
ann_base_path: str, Annotations base path
"""
crop_path = os.path.join(args.download_dir, './crop{:d}'.format(int(args.instance_size)))
if not os.path.isdir(crop_path):
makedirs(crop_path)
sub_sets = sorted({'a', 'b', 'c', 'd', 'e'})
for sub_set in sub_sets:
sub_set_base_path = os.path.join(ann_base_path, sub_set)
videos = sorted(os.listdir(sub_set_base_path))
n_videos = len(videos)
with futures.ProcessPoolExecutor(max_workers=args.num_threads) as executor:
fs = [executor.submit(crop_video, args, sub_set, video, crop_path, ann_base_path) for video in videos]
for i, f in enumerate(futures.as_completed(fs)):
# Write progress to error so that it can be seen
printProgress(i, n_videos, prefix=sub_set, suffix='Done ', barLength=40)
示例8: _visual_items_upload_with_operation
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def _visual_items_upload_with_operation(self, sequence, visual_item_upload_operation):
items_to_upload = []
for visual_item in sequence.visual_items:
if str(visual_item.index) not in sequence.progress:
items_to_upload.append(visual_item)
with THREAD_LOCK:
self.manager.progress_bar.update(len(sequence.visual_items) - len(items_to_upload))
with ThreadPoolExecutor(max_workers=self.workers) as executor:
future_events = [executor.submit(visual_item_upload_operation.upload,
visual_item) for visual_item in items_to_upload]
for completed_event in as_completed(future_events):
uploaded, index = completed_event.result()
with THREAD_LOCK:
if uploaded:
self.__persist_upload_index(index, sequence.path)
sequence.progress.append(index)
self.manager.progress_bar.update(1)
示例9: test_temp_table_concurrency
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def test_temp_table_concurrency(con, test_data_dir):
# we don't install futures on windows in CI and we can't run this test
# there anyway so we import here
import concurrent.futures
from concurrent.futures import as_completed
def limit_10(i, hdfs_path):
t = con.parquet_file(hdfs_path)
return t.sort_by(t.r_regionkey).limit(1, offset=i).execute()
nthreads = 4
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
with concurrent.futures.ThreadPoolExecutor(max_workers=nthreads) as e:
futures = [e.submit(limit_10, i, hdfs_path) for i in range(nthreads)]
assert all(map(len, (future.result() for future in as_completed(futures))))
示例10: test_multiple
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def test_multiple(ActorClass):
print('-----------------')
print('Test multiple for {}'.format(ActorClass))
# Make multiple actors and send them each multiple jobs
n_actors = 5
n_jobs = 10
actors_exs = [ActorClass.executor(a) for a in range(1, n_actors)]
fs = []
for jobid in range(n_jobs):
n = jobid + 500
fs += [ex.post({'action': 'prime', 'n': n}) for ex in actors_exs]
for f in futures.as_completed(fs):
print('n, a, prime = {}'.format(f.result()))
actors = [ex.post({'action': 'debug'}).result() for ex in actors_exs]
for a in actors:
print(a.state)
print('Test completed')
print('L______________')
示例11: _check_executor
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def _check_executor(self, dt):
start = time()
try:
for future in as_completed(self._futures[:], 0):
self._futures.remove(future)
try:
result = future.result()
except Exception:
traceback.print_exc()
# make an error tile?
continue
if result is None:
continue
callback, args = result
callback(*args)
# capped executor in time, in order to prevent too much
# slowiness.
# seems to works quite great with big zoom-in/out
if time() - start > self.cap_time:
break
except TimeoutError:
pass
示例12: process
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def process(self, start_url, crawled_urls):
self.output.info("Checking ldap injection...")
db = self.datastore.open("ldap.txt", "r")
dbfiles = [x.strip() for x in db]
for payload in dbfiles:
with ThreadPoolExecutor(max_workers=None) as executor:
futures = [
executor.submit(self.attack, payload, url) for url in crawled_urls
]
try:
for future in as_completed(futures):
future.result()
except KeyboardInterrupt:
executor.shutdown(False)
raise
示例13: process
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def process(self, start_url, crawled_urls):
self.output.info("Checking common backup files..")
db = self.datastore.open("bfile.txt", "r")
dbfiles = [x.strip() for x in db.readlines()]
db1 = self.datastore.open("cfile.txt", "r")
dbfiles1 = [x.strip() for x in db1.readlines()]
urls = []
for b in dbfiles:
for d in dbfiles1:
bdir = b.replace("[name]", d)
urls.append(urljoin(str(start_url), str(bdir)))
# We launch ThreadPoolExecutor with max_workers to None to get default optimization
# https://docs.python.org/3/library/concurrent.futures.html
with ThreadPoolExecutor(max_workers=None) as executor:
futures = [executor.submit(self.check_url, url) for url in urls]
try:
for future in as_completed(futures):
future.result()
except KeyboardInterrupt:
executor.shutdown(False)
raise
示例14: process
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def process(self, start_url, crawled_urls):
self.output.info("Checking admin interfaces...")
with self.datastore.open("admin.txt", "r") as db:
dbfiles = [x.strip() for x in db.readlines()]
urls = map(
lambda adminpath: urljoin(str(start_url), str(adminpath)), dbfiles
)
# We launch ThreadPoolExecutor with max_workers to None to get default optimization
# https://docs.python.org/3/library/concurrent.futures.html
with ThreadPoolExecutor(max_workers=None) as executor:
futures = [executor.submit(self.check_url, url) for url in urls]
try:
for future in as_completed(futures):
future.result()
except KeyboardInterrupt:
executor.shutdown(False)
raise
示例15: async_get
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import as_completed [as 别名]
def async_get(self, name: str, tag: str, parties: list) -> typing.Generator:
rubbish = Rubbish(name, tag)
futures = self._check_get_status_async(name, tag, parties)
for future in as_completed(futures):
party = futures[future]
obj, head, frags = future.result()
if isinstance(obj, _DTable):
rubbish.add_table(obj)
yield (party, obj)
else:
table, key = head
rubbish.add_obj(table, key)
if not is_split_head(obj):
yield (party, obj)
else:
frag_table, frag_keys = frags
rubbish.add_table(frag_table)
fragments = [frag_table.get(key) for key in frag_keys]
yield (party, split_get(fragments))
yield (None, rubbish)