本文整理汇总了Python中multiprocessing.pool.apply_async方法的典型用法代码示例。如果您正苦于以下问题:Python pool.apply_async方法的具体用法?Python pool.apply_async怎么用?Python pool.apply_async使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.pool
的用法示例。
在下文中一共展示了pool.apply_async方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _queue_job
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def _queue_job(self, pool, key, data_file, data_file_size):
pool.apply_async(
_fetch_and_process_chunk,
[],
{
"app_config": self.config,
"debug": self.debug,
"data_file": data_file,
"data_file_size": data_file_size,
"download_progress_per_file": self.download_progress_per_file,
"site": self.site,
"pgdata": self.pgdata,
"tablespaces": self.tablespaces,
},
lambda *args: self.job_completed(key),
lambda exception: self.job_failed(key, exception),
)
示例2: get_container_id_mapping
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def get_container_id_mapping(pool, compose_cmd):
service_names = subprocess.check_output(
compose_cmd + ["config", "--services"]
)
service_names = service_names.strip().decode("utf-8").split("\n")
id_mapping = {
name: pool.apply_async(pool_container_id, (name, compose_cmd))
for name in service_names
}
while not all(future.ready() for future in id_mapping.values()):
time.sleep(0.1)
for name, future in list(id_mapping.items()):
if not future.successful():
raise RuntimeError("Cannot get ID of service {0}".format(name))
id_mapping[name] = future.get()
return id_mapping
示例3: show_progress
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def show_progress(a):
"""
Callback for the run_calls function: print nb of calls that are done.
Args:
a: useless argument, but since this function is used as a callback by
apply_async, it has to take one argument.
"""
show_progress.counter += 1
status = '{:{fill}{width}} / {}'.format(show_progress.counter,
show_progress.total,
fill='',
width=len(str(show_progress.total)))
if show_progress.counter < show_progress.total:
status += chr(8) * len(status)
else:
status += '\n'
sys.stdout.write(status)
sys.stdout.flush()
示例4: get_ec2_offerings
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def get_ec2_offerings(instances, region, profiles):
with multiprocessing.pool.ThreadPool(processes=4) as pool:
offerings = collections.defaultdict(int)
tasks = []
print('[global - {}] Getting offerings for all instances...'.format(region))
for instance, count in instances.items():
ec2 = boto_session_getter(instance.profile, region)
tasks.append({
'profile': [instance.profile],
'remaining_profiles': [p for p in profiles if p != instance.profile],
'instance_type': instance.instance_type,
'instance_count': count,
'task': pool.apply_async(get_ec2_type_offerings,
[ec2, instance.instance_type]),
})
for i, task in zip(itertools.count(1), tasks):
if len(task['profile']) == 1:
print('[{} - {}] Getting offerings for instance {}/{}...'.format(
task['profile'][0], region, i, len(instances)))
offering = task['task'].get()
if offering:
offerings[offering] += task['instance_count']
elif len(task['remaining_profiles']):
ec2 = boto_session_getter(task['remaining_profiles'][0], region)
new_task = task.copy()
new_task['task'] = pool.apply_async(get_ec2_type_offerings, [ec2, new_task['instance_type']])
new_task['profile'].append(new_task['remaining_profiles'][0])
new_task['remaining_profiles'].pop(0)
tasks.append(new_task)
return offerings
示例5: scrape_with_timeout
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def scrape_with_timeout(page):
pool = NDPool(processes=1)
async_result = pool.apply_async(scrape_page, (page,))
result = None
try:
result = async_result.get(timeout=600)
pool.close()
except TimeoutError:
logger.info(u'page scrape timed out: {}'.format(page))
pool.terminate()
pool.join()
return result
示例6: timeout
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def timeout(timeout):
"""Timeout decorator, parameter in seconds."""
def timeout_decorator(func):
"""Wrap the original function."""
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
"""Closure for function."""
pool = multiprocessing.pool.ThreadPool(processes=1)
async_result = pool.apply_async(func, args, kwargs)
# raises a TimeoutError if execution exceeds timeout
return async_result.get(timeout)
return func_wrapper
return timeout_decorator
示例7: fit_pixel_multiprocess_nonlinear
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def fit_pixel_multiprocess_nonlinear(data, x, param, reg_mat, use_snip=False):
"""
Multiprocess fit of experiment data.
Parameters
----------
data : array
3D data of experiment spectrum
param : dict
fitting parameters
Returns
-------
dict :
fitting values for all the elements
"""
num_processors_to_use = multiprocessing.cpu_count()
logger.info('cpu count: {}'.format(num_processors_to_use))
pool = multiprocessing.Pool(num_processors_to_use)
# fit_params = lmfit.Parameters()
# for i in range(reg_mat.shape[1]):
# fit_params.add('a'+str(i), value=1.0, min=0, vary=True)
result_pool = [pool.apply_async(fit_pixel_nonlinear_per_line,
(n, data[n, :, :], x,
param, reg_mat, use_snip))
for n in range(data.shape[0])]
results = []
for r in result_pool:
results.append(r.get())
pool.terminate()
pool.join()
return results
示例8: roi_sum_multi_files
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def roi_sum_multi_files(dir_path, file_prefix,
start_i, end_i, element_dict,
interpath='entry/instrument/detector/data'):
"""
Fitting for multiple files with Multiprocessing.
Parameters
-----------
dir_path : str
file_prefix : str
start_i : int
start id of given file
end_i: int
end id of given file
element_dict : dict
dict of element with [low, high] bounds as values
interpath : str
path inside hdf5 file to fetch the data
Returns
-------
result : list
fitting result as list of dict
"""
num_processors_to_use = multiprocessing.cpu_count()
logger.info('cpu count: {}'.format(num_processors_to_use))
pool = multiprocessing.Pool(num_processors_to_use)
result_pool = [pool.apply_async(roi_sum_calculation,
(dir_path, file_prefix,
m, element_dict, interpath))
for m in range(start_i, end_i+1)]
results = []
for r in result_pool:
results.append(r.get())
pool.terminate()
pool.join()
return results
示例9: timeout
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def timeout(max_timeout):
"""Timeout decorator, parameter in seconds."""
def timeout_decorator(f):
"""Wrap the original function."""
@functools.wraps(f)
def func_wrapper(self, *args, **kwargs):
"""Closure for function."""
pool = multiprocessing.pool.ThreadPool(processes=1)
async_result = pool.apply_async(f, (self,) + args, kwargs)
timeout = kwargs.pop('timeout_max_timeout', max_timeout) or max_timeout
# raises a TimeoutError if execution exceeds max_timeout
return async_result.get(timeout)
return func_wrapper
return timeout_decorator
示例10: process_main_files
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def process_main_files(pool, snapshot_dir, compose_cmd, container_ids):
pool.apply_async(collect_backup, [snapshot_dir, compose_cmd])
pool.apply_async(collect_docker_info, [snapshot_dir])
pool.apply_async(collect_docker_version, [snapshot_dir])
pool.apply_async(
collect_docker_compose_config, [snapshot_dir, compose_cmd])
pool.apply_async(collect_all_logs, [snapshot_dir, compose_cmd])
pool.apply_async(collect_monitoring_results,
[snapshot_dir, container_ids["admin"]])
示例11: process_service_files
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def process_service_files(pool, name, container_id, snapshot_dir, compose_cmd):
service_snapshot_dir = os.path.join(snapshot_dir, name)
pool.apply_async(collect_service_log,
[service_snapshot_dir, name, compose_cmd])
pool.apply_async(collect_service_date,
[service_snapshot_dir, name, compose_cmd])
pool.apply_async(collect_service_unix_timestamp,
[service_snapshot_dir, name, compose_cmd])
pool.apply_async(collect_service_packages_os,
[service_snapshot_dir, name, compose_cmd])
pool.apply_async(collect_service_ps,
[service_snapshot_dir, name, compose_cmd])
pool.apply_async(collect_service_docker_inspect,
[service_snapshot_dir, name, container_id])
pool.apply_async(collect_service_docker_stats,
[service_snapshot_dir, name, container_id])
pool.apply_async(collect_service_config,
[service_snapshot_dir, name, container_id])
pool.apply_async(collect_service_git_release,
[service_snapshot_dir, name, container_id])
pool.apply_async(collect_service_decapod_release,
[service_snapshot_dir, name, container_id])
pool.apply_async(collect_service_packages_npm,
[service_snapshot_dir, name, container_id])
pool.apply_async(collect_service_packages_python2,
[service_snapshot_dir, name, container_id])
pool.apply_async(collect_service_packages_python3,
[service_snapshot_dir, name, container_id])
pool.apply_async(collect_service_ansible_config,
[service_snapshot_dir, name, container_id])
pool.apply_async(collect_service_private_key_sha1sum,
[service_snapshot_dir, name, compose_cmd])
示例12: run_calls
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import apply_async [as 别名]
def run_calls(fun, list_of_args, extra_args=(), pool_type='processes',
nb_workers=multiprocessing.cpu_count(), timeout=60, verbose=True,
initializer=None, initargs=None):
"""
Run a function several times in parallel with different inputs.
Args:
fun: function to be called several times in parallel.
list_of_args: list of (first positional) arguments passed to fun, one
per call
extra_args: tuple containing extra arguments to be passed to fun
(same value for all calls)
pool_type: either 'processes' or 'threads'
nb_workers: number of calls run simultaneously
timeout: number of seconds allowed per function call
verbose: either True (show the amount of computed calls) or False
initializer, initargs (optional): if initializer is not None then each
worker process will call initializer(*initargs) when it starts
Return:
list of outputs
"""
if pool_type == 'processes':
pool = multiprocessing.Pool(nb_workers, initializer, initargs)
elif pool_type == 'threads':
pool = multiprocessing.pool.ThreadPool(nb_workers)
else:
print('ERROR: unknow pool_type "{}"'.format(pool_type))
results = []
outputs = []
if verbose:
show_progress.counter = 0
show_progress.total = len(list_of_args)
for x in list_of_args:
if type(x) == tuple:
args = x + extra_args
else:
args = (x,) + extra_args
results.append(pool.apply_async(fun, args=args,
callback=show_progress if verbose else None))
for r in results:
try:
outputs.append(r.get(timeout))
except KeyboardInterrupt:
pool.terminate()
sys.exit(1)
pool.close()
pool.join()
return outputs