本文整理汇总了Python中multiprocessing.dummy.Pool.imap方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.imap方法的具体用法?Python Pool.imap怎么用?Python Pool.imap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.dummy.Pool
的用法示例。
在下文中一共展示了Pool.imap方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_query_simulations
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def run_query_simulations(states, engine='hoomd'):
"""Run all query simulations for a single iteration. """
# Gather hardware info.
gpus = _get_gpu_info()
if gpus is None:
n_procs = cpu_count()
gpus = []
logging.info("Launching {n_procs} CPU threads...".format(**locals()))
else:
n_procs = len(gpus)
logging.info("Launching {n_procs} GPU threads...".format(**locals()))
if engine.lower() == 'hoomd':
worker = _hoomd_worker
if engine.lower() == 'lammps':
worker = _lammps_worker
else:
raise UnsupportedEngine(engine)
n_states = len(states)
worker_args = zip(states, range(n_states), itertools.repeat(gpus))
chunk_size = ceil(n_states / n_procs)
pool = Pool(n_procs)
pool.imap(worker, worker_args, chunk_size)
pool.close()
pool.join()
for state in states:
_post_query(state)
示例2: run
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def run():
from multiprocessing.dummy import Pool as ThreadPool
t = [
('users', User),
('forums', Forum),
('threads', Thread),
('posts', Post),
]
for entity, factory in t:
entities = [True for i in range(int(settings[entity]))]
ready_factory = factory().create
pool = ThreadPool(8)
pool.imap(ready_factory, entities)
pool.close()
pool.join()
a = [
(int(settings['followers']), User().follow),
(int(settings['subscribptions']), Thread().subscribe),
]
for it, method in a:
for i in range(it):
url, args = method()
print "Requesting %s with %s" % (url, args)
try:
args = json.loads(args)
tools.Request(url, args, post=True).get_response()
except:
pass
示例3: parallel_bulk
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def parallel_bulk(client, actions, thread_count=4, chunk_size=500,
max_chunk_bytes=100 * 1014 * 1024,
expand_action_callback=expand_action, **kwargs):
"""
Parallel version of the bulk helper run in multiple threads at once.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg actions: iterator containing the actions
:arg thread_count: size of the threadpool to use for the bulk requests
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
:arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
from the execution of the last chunk when some occur. By default we raise.
:arg raise_on_exception: if ``False`` then don't propagate exceptions from
call to ``bulk`` and just report the items that failed as failed.
:arg expand_action_callback: callback executed on each action passed in,
should return a tuple containing the action line and the data line
(`None` if data line should be omitted).
"""
# Avoid importing multiprocessing unless parallel_bulk is used
# to avoid exceptions on restricted environments like App Engine
from multiprocessing.dummy import Pool
actions = map(expand_action_callback, actions)
pool = Pool(thread_count)
for result in pool.imap(
lambda chunk: list(_process_bulk_chunk(client, chunk, **kwargs)),
_chunk_actions(actions, chunk_size, max_chunk_bytes, client.transport.serializer)
):
for item in result:
yield item
pool.close()
pool.join()
示例4: find_process_files
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def find_process_files(root_dir):
lock = Lock()
pool = Pool()
hash_db = load_hashes(HASH_FILE)
# Keep changed .pxi hashes in a separate dict until the end
# because if we update hash_db and multiple files include the same
# .pxi file the changes won't be detected.
pxi_hashes = {}
jobs = []
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
jobs.append((cur_dir, fromfile, tofile, function, hash_db, pxi_hashes, lock))
for result in pool.imap(lambda args: process(*args), jobs):
pass
hash_db.update(pxi_hashes)
save_hashes(hash_db, HASH_FILE)
示例5: download_img
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def download_img(folder, dataset_dir, class_name, images_list, threads):
'''
Download the images.
:param folder: train, validation or test
:param dataset_dir: self explanatory
:param class_name: self explanatory
:param images_list: list of the images to download
:param threads: number of threads
:return: None
'''
image_dir = folder
download_dir = os.path.join(dataset_dir, image_dir, class_name)
downloaded_images_list = [f.split('.')[0] for f in os.listdir(download_dir)]
images_list = list(set(images_list) - set(downloaded_images_list))
pool = ThreadPool(threads)
if len(images_list) > 0:
print("[INFO] Download of {} images in {}.".format(len(images_list), folder))
commands = []
for image in images_list:
path = image_dir + '/' + str(image) + '.jpg ' + '"' + download_dir + '"'
command = 'aws s3 --no-sign-request --only-show-errors cp s3://open-images-dataset/' + path
commands.append(command)
list(tqdm(pool.imap(os.system, commands), total = len(commands) ))
print('[INFO] Done!')
pool.close()
pool.join()
else:
print('[INFO] All images already downloaded.')
示例6: put_from_manifest
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def put_from_manifest(
s3_bucket, s3_connection_host, s3_ssenc, s3_base_path,
aws_access_key_id, aws_secret_access_key, manifest,
bufsize, reduced_redundancy, rate_limit, concurrency=None, incremental_backups=False):
"""
Uploads files listed in a manifest to amazon S3
to support larger than 5GB files multipart upload is used (chunks of 60MB)
files are uploaded compressed with lzop, the .lzo suffix is appended
"""
exit_code = 0
bucket = get_bucket(
s3_bucket, aws_access_key_id,
aws_secret_access_key, s3_connection_host)
manifest_fp = open(manifest, 'r')
buffer_size = int(bufsize * MBFACTOR)
files = manifest_fp.read().splitlines()
pool = Pool(concurrency)
for f in pool.imap(upload_file,
((bucket, f, destination_path(s3_base_path, f), s3_ssenc, buffer_size, reduced_redundancy, rate_limit) for f in files if f)):
if f is None:
# Upload failed.
exit_code = 1
elif incremental_backups:
# Delete files that were successfully uploaded.
os.remove(f)
pool.terminate()
exit(exit_code)
示例7: render_one_category_model_views
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def render_one_category_model_views(shape_list, view_params):
tmp_dirname = tempfile.mkdtemp(dir=g_data_folder, prefix='tmp_view_')
if not os.path.exists(tmp_dirname):
os.mkdir(tmp_dirname)
print('Generating rendering commands...')
commands = []
for shape_synset, shape_md5, shape_file, view_num in shape_list:
# write tmp view file
tmp = tempfile.NamedTemporaryFile(dir=tmp_dirname, delete=False)
for i in range(view_num):
paramId = random.randint(0, len(view_params)-1)
tmp_string = '%f %f %f %f\n' % (view_params[paramId][0], view_params[paramId][1], view_params[paramId][2], max(0.01,view_params[paramId][3]))
tmp.write(tmp_string)
tmp.close()
command = '%s %s --background --python %s -- %s %s %s %s %s > /dev/null 2>&1' % (g_blender_executable_path, g_blank_blend_file_path, os.path.join(BASE_DIR, 'render_model_views.py'), shape_file, shape_synset, shape_md5, tmp.name, os.path.join(g_syn_images_folder, shape_synset, shape_md5))
commands.append(command)
print('done (%d commands)!'%(len(commands)))
print commands[0]
print('Rendering, it takes long time...')
report_step = 100
if not os.path.exists(os.path.join(g_syn_images_folder, shape_synset)):
os.mkdir(os.path.join(g_syn_images_folder, shape_synset))
pool = Pool(g_syn_rendering_thread_num)
for idx, return_code in enumerate(pool.imap(partial(call, shell=True), commands)):
if idx % report_step == 0:
print('[%s] Rendering command %d of %d' % (datetime.datetime.now().time(), idx, len(shape_list)))
if return_code != 0:
print('Rendering command %d of %d (\"%s\") failed' % (idx, len(shape_list), commands[idx]))
shutil.rmtree(tmp_dirname)
示例8: ffmpeg_encode
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def ffmpeg_encode(threads=1):
cmd = ['ffmpeg', '-y', '-vcodec', 'ppm','-r','23.97', '-f', 'image2pipe','-i', '-']
cmd.extend(['-vcodec', 'libx264','-pix_fmt','yuv420p', '-profile', 'baseline','-vb','15M','-crf', '16'])
cmd.extend([os.path.expanduser('~/out.mov')])
print subprocess.list2cmdline(cmd)
p = None
pool = Pool(threads)
#with ThreadPoolExecutor(max_workers=threads) as e:
for result in pool.imap(rotate,xrange(360)):
if p is None:
p = subprocess.Popen(cmd,stdin=subprocess.PIPE)
p.stdin.write(result)
p.stdin.flush()
p.stdin.close()
p.wait()
pool.close()
pool.join()
示例9: put_from_manifest
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def put_from_manifest(
s3_bucket,
s3_connection_host,
s3_ssenc,
s3_base_path,
aws_access_key_id,
aws_secret_access_key,
manifest,
bufsize,
concurrency=None,
incremental_backups=False,
):
"""
Uploads files listed in a manifest to amazon S3
to support larger than 5GB files multipart upload is used (chunks of 60MB)
files are uploaded compressed with lzop, the .lzo suffix is appended
"""
bucket = get_bucket(s3_bucket, aws_access_key_id, aws_secret_access_key, s3_connection_host)
manifest_fp = open(manifest, "r")
buffer_size = int(bufsize * MBFACTOR)
files = manifest_fp.read().splitlines()
pool = Pool(concurrency)
for _ in pool.imap(
upload_file, ((bucket, f, destination_path(s3_base_path, f), s3_ssenc, buffer_size) for f in files)
):
pass
pool.terminate()
if incremental_backups:
for f in files:
os.remove(f)
示例10: run
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def run():
t = [
('users', User().create),
('forums', Forum().create),
('threads', Thread().create),
('posts', Post().create),
("followers", User().follow),
("subscribptions", Thread().subscribe),
]
for entity, factory in t:
entities = [True for i in range(int(settings[entity]))]
num_tasks = len(entities)
pool = ThreadPool(int(settings['num_threads']))
try:
progress = range(5, 105, 5)
for i, _ in enumerate(pool.imap(factory, entities)):
perc = i * 100 / num_tasks
if perc % 5 == 0 and perc in progress:
log.print_out('Creating %s: %d%% done' % (entity, perc))
progress.remove(perc)
pool.close()
pool.join()
except Exception, e:
print e
pool.terminate()
sys.exit(1)
示例11: runLocalCommands
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def runLocalCommands(args, outputDir, commands):
# NOTE: this is going to BREAK meff optimisation if we re-cycle histograms.
# Needs to be updated to run in successive orde if we implement that.
N = len(commands)
if N > 50:
print("")
print("Are you sure you want to run %d commands locally?" % N)
if args.dry_run:
print("[NB: this is a dry run]")
var = input("Press enter to continue")
print("")
cmds = []
for i, x in enumerate(commands):
(cuts, name, cmd) = x
cmd = "cd %s && echo '%d/%d\t%s' && %s 2>&1 >/dev/null" % (outputDir, i+1, N, cmd, cmd)
cmds.append(cmd)
if args.dry_run:
print("Would run following commands:")
for cmd in cmds:
print(" %s" % cmd)
return
pool = Pool(10) # concurrent commands at a time
for i, returncode in enumerate(pool.imap(partial(subprocess.call, shell=True), cmds)):
if returncode != 0:
print(("%d command failed: %d" % (i, returncode)))
示例12: runPool
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def runPool(fname):
pool = Pool(8)
data = open(fname)
for i in pool.imap(poolWorker, data):
print i
#for i in data:
# print poolWorker(i)
return
示例13: parallel_build
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def parallel_build(jobs, log, verbose=True):
p = Pool(cpu_count)
for ok, stdout, stderr in p.imap(run_worker, jobs):
if verbose or not ok:
log(stdout)
if stderr:
log(stderr)
if not ok:
return False
return True
示例14: parallel_check_output
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def parallel_check_output(jobs, log):
p = Pool(cpu_count)
for ok, stdout, stderr in p.imap(
partial(run_worker, decorate=False), ((j, '') for j in jobs)):
if not ok:
log(stdout)
if stderr:
log(stderr)
raise SystemExit(1)
yield stdout
示例15: _install_coreos
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import imap [as 别名]
def _install_coreos(self):
commands = []
log.info(self.config_dict)
for key, value in self.config_dict.iteritems():
log.info("installing coreos on {}".format(value['disk']))
commands.append("coreos-install -v -d {} -C {} -c {}".format(value['disk'],
cfg.coreos_update_channel, value['tmpfile']))
pool = Pool(len(self.dns_names))
for i, retval in enumerate(pool.imap(partial(runcmd), commands)):
if retval[0]:
log.error("%s command failed: %s" % (i, retval[2]))