本文整理汇总了Python中multiprocessing.pool.Pool.map方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.map方法的具体用法?Python Pool.map怎么用?Python Pool.map使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.pool.Pool
的用法示例。
在下文中一共展示了Pool.map方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main_mh
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def main_mh():
samples_dir_p = Path("/RECH2/huziy/BC-MH/bc_mh_044deg/Samples")
out_dir_root = Path("/RECH2/huziy/MH_streamflows/")
if samples_dir_p.name.lower() == "samples":
out_folder_name = samples_dir_p.parent.name
else:
out_folder_name = samples_dir_p.name
varnames = ["STFA", ]
# ======================================
out_dir_p = out_dir_root.joinpath(out_folder_name)
if not out_dir_p.is_dir():
out_dir_p.mkdir(parents=True)
inputs = []
for y in range(1981, 2010):
inputs.append(dict(year=y, varnames=varnames, samples_dir=samples_dir_p, out_dir=out_dir_p, target_freq_hours=24))
# Extract the data for each year in parallel
pool = Pool(processes=3)
pool.map(extract_data_for_year_in_parallel, inputs)
示例2: run_parallel
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def run_parallel(num_processes, experiment_names, methods, sparsity_factors, run_ids):
"""
Run multiple experiments in parallel.
Parameters
----------
num_processes : int
The maximum number of processes that can run concurrently.
experiment_names : list of str
The names of experiments to run.
methods : list of str
The methods to run the experiments under (mix1, mix2, or full).
sparsity_factors : list of float
The sparsity of inducing points to run the experiments at.
run_ids : list of int
The ids of the configurations under which to run the experiments.
"""
# Setup an array of individual experiment configurations.
experiment_configs = []
for experiment in experiment_names:
for method in methods:
for sparsity_factor in sparsity_factors:
for run_id in run_ids:
experiment_configs.append({'experiment_name': experiment,
'method': method,
'sparsity_factor': sparsity_factor,
'run_id': run_id})
# Now run the experiments.
pool = Pool(num_processes)
pool.map(run_config, experiment_configs)
示例3: Pool
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
class Pool(object):
'''
'''
def __init__(self, **pool_kwargs):
try:
kw = KwargsCheck(MPIPool, pool_kwargs)
self._pool = MPIPool(**kw)
self.MPI = True
except (ImportError, ValueError):
kw = KwargsCheck(MultiPool, pool_kwargs)
self._pool = MultiPool(**kw)
self.MPI = False
if self.MPI:
if not self._pool.is_master():
self._pool.wait()
sys.exit(0)
def map(self, f, x, args = (), kwargs = {}):
'''
'''
if len(args) or len(kwargs):
w = wrap(f, *args, **kwargs)
return self._pool.map(w, x)
else:
return self._pool.map(f, x)
def close(self):
self._pool.close()
示例4: main
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def main(datadir, convert_dir, crop_size):
try:
os.mkdir(convert_dir)
except OSError:
pass
filenames = data_util.get_image_files(datadir)
print('Resizing images in {} to {}'.format(datadir, convert_dir))
n = len(filenames)
batch_size = 500
batches = n // batch_size + 1
p = Pool()
args = []
for f in filenames:
args.append((convert_size, (datadir, convert_dir, f, crop_size)))
for i in range(batches):
print('batch {:>2} / {}'.format(i + 1, batches))
p.map(convert, args[i * batch_size : (i + 1) * batch_size])
p.close()
p.join()
print('Done')
示例5: run_parallel
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def run_parallel(n_process):
"""
Creates a process for each element in the array returned by ``get_configs()`` and the experiment corresponding
the each element. The maximum number of processes to run in parallel is determined by ``n_process``
"""
p = Pool(n_process)
p.map(run_config, ExperimentRunner.get_configs())
示例6: main
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def main():
# update_item_list(SQL_USER, SQL_PASS, SQL_DATABASE)
engine = create_engine('mysql+mysqlconnector://%s:%[email protected]/%s' % (SQL_USER, SQL_PASS, SQL_DATABASE))
region_id = 10000002
item_id_list = [int(index) for (index, row) in pd.read_sql_table('items', engine, index_col='item_id').iterrows()]
data_write = partial(update_price_data, region_id)
p = Pool(initializer=init_function, initargs=(SQL_USER, SQL_PASS, SQL_DATABASE))
p.map(data_write, item_id_list)
示例7: main_crcm5_nemo
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def main_crcm5_nemo():
label = "CRCM5_NEMO"
period = Period(
datetime(1980, 1, 1), datetime(2015, 12, 31)
)
pool = Pool(processes=10)
input_params = []
for month_start in period.range("months"):
month_end = month_start.add(months=1).subtract(seconds=1)
current_month_period = Period(month_start, month_end)
current_month_period.months_of_interest = [month_start.month, ]
vname_to_level_erai = {
T_AIR_2M: VerticalLevel(1, level_kinds.HYBRID),
U_WE: VerticalLevel(1, level_kinds.HYBRID),
V_SN: VerticalLevel(1, level_kinds.HYBRID),
}
vname_map = {}
vname_map.update(vname_map_CRCM5)
vname_map = {}
vname_map.update(vname_map_CRCM5)
vname_map.update({
default_varname_mappings.SNOWFALL_RATE: "SN"
})
label_to_config = OrderedDict([(
label, {
DataManager.SP_BASE_FOLDER: "/snow3/huziy/NEI/GL/erai0.75deg_driven/GL_with_NEMO_dtN_1h_and_30min/Samples",
DataManager.SP_DATASOURCE_TYPE: data_source_types.SAMPLES_FOLDER_FROM_CRCM_OUTPUT,
DataManager.SP_INTERNAL_TO_INPUT_VNAME_MAPPING: vname_map,
DataManager.SP_LEVEL_MAPPING: vname_to_level_erai,
DataManager.SP_OFFSET_MAPPING: vname_to_offset_CRCM5,
DataManager.SP_MULTIPLIER_MAPPING: vname_to_multiplier_CRCM5,
DataManager.SP_VARNAME_TO_FILENAME_PREFIX_MAPPING: default_varname_mappings.vname_to_fname_prefix_CRCM5,
"out_folder": "lake_effect_analysis_{}_{}-{}_monthly".format(label, period.start.year, period.end.year)
}
)])
kwargs = dict(
label_to_config=label_to_config, period=current_month_period, months_of_interest=current_month_period.months_of_interest, nprocs_to_use=1
)
print(current_month_period.months_of_interest)
input_params.append(kwargs)
# execute in parallel
pool.map(monthly_func, input_params)
示例8: main_crcm5_hl
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def main_crcm5_hl():
label = "CRCM5_HL"
period = Period(
datetime(1980, 1, 1), datetime(2009, 12, 31)
)
pool = Pool(processes=12)
input_params = []
for month_start in period.range("months"):
month_end = month_start.add(months=1).subtract(seconds=1)
current_month_period = Period(month_start, month_end)
current_month_period.months_of_interest = [month_start.month, ]
vname_to_level_erai = {
T_AIR_2M: VerticalLevel(1, level_kinds.HYBRID),
U_WE: VerticalLevel(1, level_kinds.HYBRID),
V_SN: VerticalLevel(1, level_kinds.HYBRID),
}
vname_map = {}
vname_map.update(vname_map_CRCM5)
vname_map = {}
vname_map.update(vname_map_CRCM5)
vname_map.update({
default_varname_mappings.SNOWFALL_RATE: "U3"
})
label_to_config = OrderedDict([(
label, {
DataManager.SP_BASE_FOLDER: "/RECH2/huziy/coupling/GL_440x260_0.1deg_GL_with_Hostetler/Samples_selected",
DataManager.SP_DATASOURCE_TYPE: data_source_types.SAMPLES_FOLDER_FROM_CRCM_OUTPUT_VNAME_IN_FNAME,
DataManager.SP_INTERNAL_TO_INPUT_VNAME_MAPPING: vname_map,
DataManager.SP_LEVEL_MAPPING: vname_to_level_erai,
DataManager.SP_OFFSET_MAPPING: vname_to_offset_CRCM5,
DataManager.SP_MULTIPLIER_MAPPING: vname_to_multiplier_CRCM5,
"out_folder": "lake_effect_analysis_{}_{}-{}_monthly".format(label, period.start.year, period.end.year)
}
)])
kwargs = dict(
label_to_config=label_to_config,
period=current_month_period,
months_of_interest=current_month_period.months_of_interest,
nprocs_to_use=1
)
print(current_month_period.months_of_interest)
input_params.append(kwargs)
# execute in parallel
pool.map(monthly_func, input_params)
示例9: main_future
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def main_future(nprocs=20):
period = Period(
datetime(2079, 1, 1), datetime(2100, 12, 31)
)
label = "CRCM5_NEMO_fix_TT_PR_CanESM2_RCP85_{}-{}_monthly".format(period.start.year, period.end.year)
vname_to_level_erai = {
T_AIR_2M: VerticalLevel(1, level_kinds.HYBRID),
U_WE: VerticalLevel(1, level_kinds.HYBRID),
V_SN: VerticalLevel(1, level_kinds.HYBRID),
}
base_folder = "/scratch/huziy/Output/GL_CC_CanESM2_RCP85/coupled-GL-future_CanESM2/Samples"
vname_map = {}
vname_map.update(vname_map_CRCM5)
# vname_map[default_varname_mappings.SNOWFALL_RATE] = "SN"
vname_map[default_varname_mappings.SNOWFALL_RATE] = "XXX"
pool = Pool(processes=nprocs)
input_params = []
for month_start in period.range("months"):
month_end = month_start.add(months=1).subtract(seconds=1)
current_month_period = Period(month_start, month_end)
current_month_period.months_of_interest = [month_start.month, ]
label_to_config = OrderedDict([(
label, {
# "base_folder": "/HOME/huziy/skynet3_rech1/CRCM5_outputs/cc_canesm2_rcp85_gl/coupled-GL-future_CanESM2/Samples",
DataManager.SP_BASE_FOLDER: base_folder,
DataManager.SP_DATASOURCE_TYPE: data_source_types.SAMPLES_FOLDER_FROM_CRCM_OUTPUT,
DataManager.SP_INTERNAL_TO_INPUT_VNAME_MAPPING: vname_map,
DataManager.SP_LEVEL_MAPPING: vname_to_level_erai,
DataManager.SP_OFFSET_MAPPING: vname_to_offset_CRCM5,
DataManager.SP_MULTIPLIER_MAPPING: vname_to_multiplier_CRCM5,
DataManager.SP_VARNAME_TO_FILENAME_PREFIX_MAPPING: vname_to_fname_prefix_CRCM5,
"out_folder": "lake_effect_analysis_{}_{}-{}".format(label, period.start.year, period.end.year)
}
)])
kwargs = dict(
label_to_config=label_to_config, period=current_month_period, months_of_interest=current_month_period.months_of_interest, nprocs_to_use=1
)
print(current_month_period.months_of_interest)
input_params.append(kwargs)
# execute in parallel
pool.map(monthly_func, input_params)
示例10: launchCMAESForAllTargetSizesMulti
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def launchCMAESForAllTargetSizesMulti():
'''
Launch in parallel (on differents processor) the cmaes optimization for each target size
'''
#initializes setup variables
rs = ReadSetupFile()
#initializes a pool of worker, ie multiprocessing
p = Pool()
#run cmaes on each targets size on separate processor
p.map(launchCMAESForSpecificTargetSize, rs.sizeOfTarget, "theta")
示例11: get_word
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def get_word():
domains=open('dic/newwords').readlines()
try:
pool=Pool(processes=2)
pool.map(check_domain,domains)
pool.close()
pool.join()
except Exception as e:
print e
pass
示例12: run
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def run(self, test_name=None, db_adapter=None):
if db_adapter is None:
db_adapter = DEFAULT_DATABASE_ADAPTER
if test_name is None:
test_name = '_'.join([db_adapter, datetime.datetime.now().strftime("%Y-%m-%d %H:%M")])
print ''.join(['Running "', test_name, '" test'])
print 'Prepare database'
adapter = adapter_factory(db_adapter)
adapter.prepare_db()
test_id = adapter.create_new_test(test_name)
print ''
print 'Create user documents'
pool = Pool(processes=10)
params = [{'user_id': i, 'docs_per_user': DOCS_PER_USER, 'db_adapter': db_adapter}
for i in range(1, USERS_COUNT + 1)]
start = time.time()
try:
pool.map(create_users, params)
print 'Full time:', time.time() - start
finally:
pool.terminate()
del pool
print 'OK! Users were created!'
print ''
for i in range(1, MAX_PROCESSES + 1):
print 'Run test with %d proceses' % i
pool = Pool(processes=i)
params = [{'user_id': j, 'db_adapter': db_adapter} for j in range(1, USERS_COUNT + 1)]
start = time.time()
try:
res = pool.map(update_users, params)
full_time = time.time() - start
finally:
pool.terminate()
del pool
print 'Test is finished! Save results'
print ''
adapter.save_results(test_id, res, i)
print 'Full time:', full_time
print ''
print 'Finish!'
示例13: main
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def main():
ts = time()
client_id = os.getenv('IMGUR_CLIENT_ID')
if not client_id:
raise Exception("Couldn't find IMGUR_CLIENT_ID environment variable!")
download_dir = setup_download_dir()
links = [l for l in get_links(client_id) if l.endswith('.jpg')]
download = partial(download_link, download_dir)
p = Pool(8)
p.map(download, links)
print('Took {}s'.format(time() - ts))
示例14: validate_series
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def validate_series(yaml_file, sequence_dictionary):
"""
:param yaml_file: The mdl yaml file.
:param sequence_dictionary: Dictionary of sequences
:return: Runs a large number of sequence tests on the series to make sure
the sequences for each protein match the given sequence and the series itself
"""
yaml_file = load_yaml_file(yaml_file)
p = Pool(cpu_count())
jobs = [(yaml_file, protein, sequence_dictionary) for protein in yaml_file["protein_list"]]
p.map(_validate_protein, jobs)
return
示例15: main_obs
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map [as 别名]
def main_obs():
label = "Obs_monthly_icefix_test2_1proc_speedtest_3"
period = Period(
datetime(1980, 1, 1), datetime(2010, 12, 31)
)
pool = Pool(processes=20)
input_params = []
for month_start in period.range("months"):
month_end = month_start.add(months=1).subtract(seconds=1)
current_month_period = Period(month_start, month_end)
current_month_period.months_of_interest = [month_start.month, ]
vname_to_level_erai = {
T_AIR_2M: VerticalLevel(1, level_kinds.HYBRID),
U_WE: VerticalLevel(1, level_kinds.HYBRID),
V_SN: VerticalLevel(1, level_kinds.HYBRID),
}
vname_map = {}
vname_map.update(vname_map_CRCM5)
label_to_config = OrderedDict([(
label, {
DataManager.SP_BASE_FOLDER: "/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix",
DataManager.SP_DATASOURCE_TYPE: data_source_types.ALL_VARS_IN_A_FOLDER_IN_NETCDF_FILES_OPEN_EACH_FILE_SEPARATELY,
DataManager.SP_INTERNAL_TO_INPUT_VNAME_MAPPING: vname_map,
DataManager.SP_LEVEL_MAPPING: vname_to_level_erai,
DataManager.SP_OFFSET_MAPPING: vname_to_offset_CRCM5,
DataManager.SP_MULTIPLIER_MAPPING: vname_to_multiplier_CRCM5,
DataManager.SP_VARNAME_TO_FILENAME_PREFIX_MAPPING: vname_to_fname_prefix_CRCM5,
"out_folder": "lake_effect_analysis_daily_{}_{}-{}".format(label, period.start.year, period.end.year)
}
)])
kwargs = dict(
label_to_config=label_to_config, period=current_month_period, months_of_interest=current_month_period.months_of_interest, nprocs_to_use=1
)
print(current_month_period.months_of_interest)
input_params.append(kwargs)
# execute in parallel
pool.map(monthly_func, input_params)