本文整理汇总了Python中joblib.pool.MemmapingPool.map_async方法的典型用法代码示例。如果您正苦于以下问题:Python MemmapingPool.map_async方法的具体用法?Python MemmapingPool.map_async怎么用?Python MemmapingPool.map_async使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类joblib.pool.MemmapingPool
的用法示例。
在下文中一共展示了MemmapingPool.map_async方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: StatefulPool
# 需要导入模块: from joblib.pool import MemmapingPool [as 别名]
# 或者: from joblib.pool.MemmapingPool import map_async [as 别名]
class StatefulPool(object):
def __init__(self):
self.n_parallel = 1
self.pool = None
self.queue = None
self.worker_queue = None
self.G = SharedGlobal()
def initialize(self, n_parallel):
self.n_parallel = n_parallel
if self.pool is not None:
print("Warning: terminating existing pool")
self.pool.terminate()
self.queue.close()
self.worker_queue.close()
self.G = SharedGlobal()
if n_parallel > 1:
self.queue = mp.Queue()
self.worker_queue = mp.Queue()
self.pool = MemmapingPool(
self.n_parallel,
temp_folder="/tmp",
)
def run_each(self, runner, args_list=None):
"""
Run the method on each worker process, and collect the result of execution.
The runner method will receive 'G' as its first argument, followed by the arguments
in the args_list, if any
:return:
"""
if args_list is None:
args_list = [tuple()] * self.n_parallel
assert len(args_list) == self.n_parallel
if self.n_parallel > 1:
#return [runner(self.G, *args_list[i]) for i in range(self.n_parallel)]
results = self.pool.map_async(
_worker_run_each, [(runner, args) for args in args_list]
)
for i in range(self.n_parallel):
self.worker_queue.get()
for i in range(self.n_parallel):
self.queue.put(None)
return results.get()
return [runner(self.G, *args_list[0])]
def run_map(self, runner, args_list):
if self.n_parallel > 1:
return self.pool.map(_worker_run_map, [(runner, args) for args in args_list])
else:
ret = []
for args in args_list:
ret.append(runner(self.G, *args))
return ret
def run_imap_unordered(self, runner, args_list):
if self.n_parallel > 1:
for x in self.pool.imap_unordered(_worker_run_map, [(runner, args) for args in args_list]):
yield x
else:
for args in args_list:
yield runner(self.G, *args)
def run_collect(self, collect_once, threshold, args=None, show_prog_bar=True, multi_task=False):
"""
Run the collector method using the worker pool. The collect_once method will receive 'G' as
its first argument, followed by the provided args, if any. The method should return a pair of values.
The first should be the object to be collected, and the second is the increment to be added.
This will continue until the total increment reaches or exceeds the given threshold.
Sample script:
def collect_once(G):
return 'a', 1
stateful_pool.run_collect(collect_once, threshold=3) # => ['a', 'a', 'a']
:param collector:
:param threshold:
:return:
"""
if args is None:
args = tuple()
if self.pool and multi_task:
manager = mp.Manager()
counter = manager.Value('i', 0)
lock = manager.RLock()
inputs = [(collect_once, counter, lock, threshold, arg) for arg in args]
results = self.pool.map_async(
_worker_run_collect,
inputs,
)
if show_prog_bar:
pbar = ProgBarCounter(threshold)
last_value = 0
while True:
time.sleep(0.1)
with lock:
if counter.value >= threshold:
#.........这里部分代码省略.........