本文整理汇总了Python中multiprocessing.dummy.Pool.imap_unordered方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.imap_unordered方法的具体用法?Python Pool.imap_unordered怎么用?Python Pool.imap_unordered使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.dummy.Pool
的用法示例。
在下文中一共展示了Pool.imap_unordered方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_worker
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def run_worker(data, versions, channels):
t_pool = ThreadPool()
t_pool.imap_unordered(partial(generate_statistics,
versions=versions,
channels=channels), data)
t_pool.close()
t_pool.join()
示例2: find_process_files
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def find_process_files(root_dir):
lock = Lock()
try:
num_proc = int(os.environ.get('SCIPY_NUM_CYTHONIZE_JOBS', ''))
pool = Pool(processes=num_proc)
except ValueError:
pool = Pool()
hash_db = load_hashes(HASH_FILE)
# Keep changed pxi/pxd hashes in a separate dict until the end
# because if we update hash_db and multiple files include the same
# .pxi file the changes won't be detected.
dep_hashes = {}
# Run any _generate_pyx.py scripts
jobs = []
for cur_dir, dirs, files in os.walk(root_dir):
generate_pyx = os.path.join(cur_dir, '_generate_pyx.py')
if os.path.exists(generate_pyx):
jobs.append(generate_pyx)
for result in pool.imap_unordered(lambda fn: process_generate_pyx(fn, lock), jobs):
pass
# Process pyx files
jobs = []
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
jobs.append((cur_dir, fromfile, tofile, function,
hash_db, dep_hashes, lock))
for result in pool.imap_unordered(lambda args: process(*args), jobs):
pass
hash_db.update(dep_hashes)
save_hashes(hash_db, HASH_FILE)
示例3: main
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def main():
urls = [
'http://www.python.org',
'https://stackoverflow.com/',
'https://css-tricks.com/',
'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference',
'https://dev.twitter.com/',
'https://d3js.org/',
'https://www.heroku.com/',
'https://docs.pytest.org/en/latest/',
'https://www.djangoproject.com/',
'https://pudding.cool/',
'https://caniuse.com/',
'http://svgpocketguide.com/book/',
'https://www.w3.org/TR/SVG/intro.html',
]
pool = Pool()
start = time.time()
for x, y in pool.imap_unordered(url_name, urls):
index = urls.index(y)
log.info("{}s (sleep: {}) (#{} in array) for {})"
.format(int(time.time() - start), x, index, y))
pool.close()
pool.join()
示例4: _download_all
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def _download_all(items):
"""Async download of the files.
Example: [(url, quality, file_path)]
"""
global WORKERS
# Don't start more workers then 1:1
if WORKERS < len(items):
WORKERS = len(items)
pool = ThreadPool(WORKERS)
chunks = 1 # TODO
# 1 ffmpeg is normally 10x- 20x * 2500kbits ish
# so depending on how many items you download and
# your bandwidth you might need to tweak chunk
results = pool.imap_unordered(dl, items, chunks)
try:
for j in tqdm.tqdm(results, total=len(items)):
pass
finally:
pool.close()
pool.join()
示例5: _maybe_convert_set
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def _maybe_convert_set(extracted_dir, source_csv, target_csv):
print()
if path.exists(target_csv):
print('Found CSV file "%s" - not importing "%s".' % (target_csv, source_csv))
return
print('No CSV file "%s" - importing "%s"...' % (target_csv, source_csv))
samples = []
with open(source_csv) as source_csv_file:
reader = csv.DictReader(source_csv_file)
for row in reader:
samples.append((row['filename'], row['text']))
# Mutable counters for the concurrent embedded routine
counter = { 'all': 0, 'too_short': 0, 'too_long': 0 }
lock = RLock()
num_samples = len(samples)
rows = []
def one_sample(sample):
mp3_filename = path.join(*(sample[0].split('/')))
mp3_filename = path.join(extracted_dir, mp3_filename)
# Storing wav files next to the mp3 ones - just with a different suffix
wav_filename = path.splitext(mp3_filename)[0] + ".wav"
_maybe_convert_wav(mp3_filename, wav_filename)
frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT))
file_size = path.getsize(wav_filename)
with lock:
if int(frames/SAMPLE_RATE*1000/10/2) < len(str(sample[1])):
# Excluding samples that are too short to fit the transcript
counter['too_short'] += 1
elif frames/SAMPLE_RATE > MAX_SECS:
# Excluding very long samples to keep a reasonable batch-size
counter['too_long'] += 1
else:
# This one is good - keep it for the target CSV
rows.append((wav_filename, file_size, sample[1]))
counter['all'] += 1
print('Importing mp3 files...')
pool = Pool(cpu_count())
bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR)
for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1):
bar.update(i)
bar.update(num_samples)
pool.close()
pool.join()
print('Writing "%s"...' % target_csv)
with open(target_csv, 'w') as target_csv_file:
writer = csv.DictWriter(target_csv_file, fieldnames=FIELDNAMES)
writer.writeheader()
bar = progressbar.ProgressBar(max_value=len(rows), widgets=SIMPLE_BAR)
for filename, file_size, transcript in bar(rows):
writer.writerow({ 'wav_filename': filename, 'wav_filesize': file_size, 'transcript': transcript })
print('Imported %d samples.' % (counter['all'] - counter['too_short'] - counter['too_long']))
if counter['too_short'] > 0:
print('Skipped %d samples that were too short to match the transcript.' % counter['too_short'])
if counter['too_long'] > 0:
print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS))
示例6: main
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def main(args):
print(args)
pool = Pool()
protein_name = os.path.splitext(os.path.basename(args.file))[0]
with open(args.settings, 'rb') as f:
reader = csv.reader(f, delimiter=';')
header = reader.next()
col_idx = dict(itertools.izip(header, xrange(len(header))))
# Now we can get a column index by name: `col_idx['Age']`
settings_list = [row for row in reader]
commands = list()
for row in settings_list:
dab_shift = int(row[col_idx['DAB shift']])
hem_shift = int(row[col_idx['HEM shift']])
fileout = os.path.join(args.out, protein_name + "_d%d-h%d.csv" % (dab_shift, hem_shift))
shstr = "python2 cli_hpa.py %s %f --dab-shift %d --hem-shift %d --mp-disable --quiet --out %s" % (
args.file, args.scale, dab_shift, hem_shift, fileout)
commands.append(shstr)
print(commands)
# quit()
for i, returncode in enumerate(pool.imap_unordered(partial(subprocess.call, shell=True), commands)):
print("Let's play! %d" % i)
if returncode != 0:
print("%d command failed: %d" % (i, returncode))
示例7: check_vm_connectivity
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def check_vm_connectivity(env, os_conn, vm_keypair=None, timeout=4 * 60):
"""Check that all vms can ping each other and public ip"""
ping_plan = {}
exc = []
def check(args):
server, ips_to_ping = args
try:
check_ping_from_vm(env, os_conn, server, vm_keypair, ips_to_ping,
timeout=timeout)
except AssertionError as e:
return e
servers = os_conn.get_servers()
for server1 in servers:
ips_to_ping = [settings.PUBLIC_TEST_IP]
for server2 in servers:
if server1 == server2:
continue
ips_to_ping += os_conn.get_nova_instance_ips(
server2).values()
ping_plan[server1] = ips_to_ping
p = Pool(len(ping_plan))
for result in p.imap_unordered(check, ping_plan.items()):
if result is not None:
exc.append(result)
if len(exc) > 0:
raise MultipleAssertionErrors(exc)
示例8: load
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def load(cls, docs, ignore_errors=False):
"""Force load the provided docs to read from file system."""
if not docs:
return
pod = docs[0].pod
def load_func(doc):
"""Force the doc to read the source file."""
try:
# pylint: disable=pointless-statement
doc.has_serving_path() # Using doc fields forces file read.
except document_front_matter.BadFormatError:
if not ignore_errors:
raise
with pod.profile.timer('DocsLoader.load'):
if ThreadPool is None or len(docs) < cls.MIN_POOL_COUNT:
for doc in docs:
load_func(doc)
return
pool_size = min(cls.MAX_POOL_SIZE, len(docs) * cls.POOL_RATIO)
pool_size = int(round(pool_size))
thread_pool = ThreadPool(pool_size)
results = thread_pool.imap_unordered(load_func, docs)
# Loop results to make sure that the threads are all processed.
for _ in results:
pass
thread_pool.close()
thread_pool.join()
示例9: img_rescaler
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def img_rescaler(dir_in, extension_in, threads=1):
"""
Import an image, rescale it to normal UBYTE (0-255, 8 bit) range, and re-save it.
"""
dir_out = os.path.join(dir_in, "rescaled")
total_files = 0
for path, folder, filename in os.walk(dir_in):
if dir_out not in path:
for f in filename:
if f.endswith(extension_in):
total_files += 1
print("\nYou have {} images to analyze".format(total_files))
for path, folder, filename in os.walk(dir_in):
if dir_out not in path: # Don't run in the output directory.
# Make directory for saving objects
subpath = path[len(dir_in)+1:]
if not os.path.exists(os.path.join(dir_out, subpath)):
os.mkdir(os.path.join(dir_out, subpath))
# What we'll do:
global _core_fn # bad form for Pool.map() compatibility
def _core_fn(filename):
if filename.endswith(extension_in):
# count progress.
path_in = os.path.join(path, filename)
subpath_in = os.path.join(subpath, filename) # for printing purposes
path_out = os.path.join(dir_out, subpath, filename)
if os.path.exists(path_out): #skip
print("\nALREADY ANALYZED: {}. Skipping...\n".format(subpath_in))
else: #(try to) do it
try:
img = io.imread(path_in) # load image
img = img_as_ubyte(img / np.max(img))
io.imsave(path_out, img)
except:
print("Couldn't analyze {}".format(subpath_in))
return()
# run it
sleep(1) # to give everything time to load
thread_pool = Pool(threads)
# Work on _core_fn (and give progressbar)
tqdm.tqdm(thread_pool.imap_unordered(_core_fn,
filename,
chunksize=1),
total=total_files)
# finish
thread_pool.close()
thread_pool.join()
return()
示例10: calc_factorials
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def calc_factorials(max_int=100, pool_size=8, threads=True, chunk_size=10):
if threads:
pool = ThreadPool(pool_size)
else:
pool = ProcessPool(pool_size)
results = pool.imap_unordered(factorial_calc, range(max_int), chunk_size)
return results
示例11: main
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def main():
input_filename, output_dir, n_threads = parse_args()
if not os.path.isdir(output_dir):
print("Output directory {} does not exist".format(output_dir))
sys.exit()
with open(input_filename) as input_file:
reader = csv.reader(input_file)
header_row = next(reader)
rows = list(reader)
try:
row_idx_image_id = header_row.index('ImageId')
row_idx_url = header_row.index('URL')
row_idx_x1 = header_row.index('x1')
row_idx_y1 = header_row.index('y1')
row_idx_x2 = header_row.index('x2')
row_idx_y2 = header_row.index('y2')
except ValueError as e:
print('One of the columns was not found in the source file: ',
e.message)
rows = [(row[row_idx_image_id], row[row_idx_url], float(row[row_idx_x1]),
float(row[row_idx_y1]), float(row[row_idx_x2]),
float(row[row_idx_y2])) for row in rows]
if n_threads > 1:
pool = ThreadPool(n_threads)
partial_get_images = partial(get_image, output_dir=output_dir)
for i, _ in enumerate(pool.imap_unordered(partial_get_images, rows),
1):
sys.stderr.write('\rDownloaded {0} images'.format(i + 1))
pool.close()
pool.join()
else:
failed_to_download = set()
for idx in range(len(rows)):
row = rows[idx]
if not download_image(image_id=row[0],
url=row[1],
x1=float(row[2]),
y1=float(row[3]),
x2=float(row[4]),
y2=float(row[5]),
output_dir=output_dir):
failed_to_download.add(row[row_idx_image_id])
sys.stdout.write('\rDownloaded {0} images'.format(idx + 1))
sys.stdout.flush()
print()
if failed_to_download:
print('\nUnable to download images with the following IDs:')
for image_id in failed_to_download:
print(image_id)
示例12: run_task_multi_thread
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def run_task_multi_thread(action_function, files, action_label, nb_threads=2, offset=0):
"""Run given action on every files using a threading pool.
It uses a progress bar instead of a usual verbose log.
"""
pool = Pool(processes=nb_threads)
items = [(file, action_function) for file in files[offset:]]
pool_iterable = pool.imap_unordered(run_single_task, items)
progress_bar_items = tqdm(total=len(items),
iterable=pool_iterable,
unit='images',
desc='{0: <30}'.format(action_label))
for item in progress_bar_items:
pass
示例13: match
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def match(bot, opponent):
# List of match's results
results = []
# List of matches to perform
matches = [[bot, opponent]] * args.count
# Threads the matches and collect results
pool = Pool(args.threads)
for match in pool.imap_unordered(perform, matches):
results.append(match)
return stat_create(results)
示例14: getPool
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
class ScannerPool:
# @classmethod
# def getPool(cls):
# if "pool" not in cls.__dict__ or cls.pool is None:
# logger.info("Threads pool created with %d threads" % THREAD_NUMBER)
# cls.pool = Pool(THREAD_NUMBER)
# return cls.pool
def __init__(self):
self.pool = Pool(THREAD_NUMBER)
def map(self, *args, **kwargs):
return self.pool.imap_unordered(*args, **kwargs)
示例15: build_common
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap_unordered [as 别名]
def build_common(out_name='common.a', build_dir='temp_build/temp_build', num_parallel=1):
compiler = os.environ.get('CXX', 'g++')
ar = os.environ.get('AR', 'ar')
libtool = os.environ.get('LIBTOOL', 'libtool')
cflags = os.environ.get('CFLAGS', '') + os.environ.get('CXXFLAGS', '')
for file in COMMON_FILES:
outfile = os.path.join(build_dir, os.path.splitext(file)[0] + '.o')
outdir = os.path.dirname(outfile)
if not os.path.exists(outdir):
print('mkdir', outdir)
os.makedirs(outdir)
def build_one(file):
outfile = os.path.join(build_dir, os.path.splitext(file)[0] + '.o')
if os.path.exists(outfile):
return
cmd = '{cc} -fPIC -c {cflags} {args} {includes} {infile} -o {outfile}'.format(
cc=compiler,
cflags=cflags,
args=' '.join(ARGS),
includes=' '.join('-I' + i for i in INCLUDES),
infile=file,
outfile=outfile,
)
print(cmd)
subprocess.check_call(shlex.split(cmd))
return outfile
pool = Pool(num_parallel)
obj_files = list(pool.imap_unordered(build_one, COMMON_FILES))
if sys.platform.startswith('darwin'):
cmd = '{libtool} -static -o {outfile} {infiles}'.format(
libtool=libtool,
outfile=out_name,
infiles=' '.join(obj_files),
)
print(cmd)
subprocess.check_call(shlex.split(cmd))
else:
cmd = '{ar} rcs {outfile} {infiles}'.format(
ar=ar,
outfile=out_name,
infiles=' '.join(obj_files)
)
print(cmd)
subprocess.check_call(shlex.split(cmd))