本文整理汇总了Python中multiprocessing.Pool.starmap_async方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.starmap_async方法的具体用法?Python Pool.starmap_async怎么用?Python Pool.starmap_async使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Pool
的用法示例。
在下文中一共展示了Pool.starmap_async方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Parallelism
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap_async [as 别名]
class Parallelism(object):
""" 多进程map类
pl = ParallelSim()
pl.add(yourFunc, yourIter)
data = pl.get_results()
data = list(data)
print(data)
"""
def __init__(self, processes=cpu_count()):
'''
:param processes: 进程数量,默认为cpu个数
'''
self.pool = Pool(processes=processes)
self.total_processes = 0
self.completed_processes = 0
self.results = []
self.data = None
self.cores = processes # cpu核心数量
def add(self, func, iter):
if isinstance(iter, list) and self.cores > 1 and len(iter) > self.cores:
for i in range(self.cores):
pLen = int(len(iter) / self.cores) + 1
self.data = self.pool.starmap_async(func, iter[int(i * pLen):int((i + 1) * pLen)],
callback=self.complete,
error_callback=self.exception)
self.total_processes += 1
else:
self.data = self.pool.starmap_async(func=func, iterable=iter, callback=self.complete,
error_callback=self.exception)
self.total_processes += 1
# self.data.get()
def complete(self, result):
self.results.extend(result)
self.completed_processes += 1
print('Progress: {:.2f}%'.format((self.completed_processes / self.total_processes) * 100))
def exception(self, exception = None):
print(exception)
def run(self):
self.data.get()
self.pool.close()
self.pool.join()
def get_results(self):
return self.results
示例2: build_trees
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap_async [as 别名]
def build_trees(topo, real, num_rep, part):
prefix = 'nantes/{}_{}*.pack'.format(topo, 'yes' if real else 'no')
def is_packed_tree(name):
return 'gtx' in name or 'bfs' in name or 'rst' in name
graphs = sorted(((f, os.stat(f).st_size) for f in glob(prefix) if not is_packed_tree(f)),
key=lambda x: x[1])
tasks = []
for filename, size_b in graphs:
prefix = os.path.splitext(filename)[0]
for i, treekind in product(range(num_rep), ['bfs', 'gtx', 'rst']):
tid = part * num_rep + i
tree_filename = '{}_{}_{}.pack'.format(prefix, treekind, tid)
if not os.path.exists(tree_filename):
tasks.append((size_b, (filename, treekind, tid)))
num_threads = 8
tasks = distribute_tasks(tasks, num_threads)
pool = Pool(num_threads)
pool.starmap_async(single_tree, tasks, chunksize=max(1, len(tasks) // num_threads))
pool.close()
pool.join()
示例3: _speckleDisplacementMulticore
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap_async [as 别名]
def _speckleDisplacementMulticore(image, image_ref, stride,
halfsubwidth, halfTemplateSize,
subpixelResolution,
ncores, taskPerCore, verbose):
print('MESSAGE: _speckleDisplacementMulticore:')
print("MESSAGE: %d cpu's available" % cpu_count())
nprocesses = int(cpu_count() * ncores)
p = Pool(processes=nprocesses)
print("MESSAGE: Using %d cpu's" % p._processes)
irange = np.arange(halfsubwidth, image.shape[0] - halfsubwidth + 1, stride)
jrange = np.arange(halfsubwidth,image.shape[1] - halfsubwidth + 1, stride)
ntasks = np.size(irange) * np.size(jrange)
chunksize = ntasks // p._processes // taskPerCore + 1
if subpixelResolution is not None:
if verbose: print('MESSAGE: register_translation method.')
parList = [image, image_ref, halfsubwidth, subpixelResolution]
func_4_starmap_async = _func_4_starmap_async_method1
elif halfTemplateSize is not None:
if verbose: print('MESSAGE: match_template method.')
parList = [image, image_ref, halfsubwidth, halfTemplateSize]
func_4_starmap_async = _func_4_starmap_async_method2
res = p.starmap_async(func_4_starmap_async,
zip(itertools.product(irange, jrange),
itertools.repeat(parList)),
chunksize=chunksize)
p.close() # No more work
wpu.progress_bar4pmap(res) # Holds the program in a loop waiting
# starmap_async to finish
sx = np.array(res.get())[:, 0].reshape(len(irange), len(jrange))
sy = np.array(res.get())[:, 1].reshape(len(irange), len(jrange))
error = np.array(res.get())[:, 2].reshape(len(irange), len(jrange))
return (sx, sy, error, stride)
示例4: int
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap_async [as 别名]
received_data = {}
active_processes = {}
while True:
data = data_catcher.get()
file = data
file = file.split("/")
file_name = file[-1]
file_path = file[:-1]
if file_name not in active_processes.keys():
api = graphing_api.GraphingApplication()
api.open_file(data)
number_of_trials = int(api.number_trials) + 1
manager = Manager()
queue = manager.Queue()
pool_count = multiprocessing.cpu_count() * 2
processes = Pool(processes=pool_count, maxtasksperchild=2)
list_of_trials = [str(x) for x in range(1, number_of_trials)]
print("starting file analysis")
start_time = time.time()
for image, trial in processes.starmap_async(load_to_memory, zip(repeat(data), list_of_trials)).get():
received_data[trial] = image
print("Took " + str(delay(start_time)) + "to finish analysis of file")
parsed = True
active_processes[file_name] = NewWindow(number_of_trials, file_name, received_data)
active_processes[file_name].start()