本文整理汇总了Python中concurrent.futures.ProcessPoolExecutor类的典型用法代码示例。如果您正苦于以下问题:Python ProcessPoolExecutor类的具体用法?Python ProcessPoolExecutor怎么用?Python ProcessPoolExecutor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ProcessPoolExecutor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_from_path
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
'''Preprocesses the LJ Speech dataset from a given input path into a given output directory.
Args:
in_dir: The directory where you have downloaded the LJ Speech dataset
out_dir: The directory to write the output into
num_workers: Optional number of worker processes to parallelize across
tqdm: You can optionally pass tqdm to get a nice progress bar
Returns:
A list of tuples describing the training examples. This should be written to train.txt
'''
# We use ProcessPoolExecutor to parallelize across processes. This is just an optimization and you
# can omit it and just call _process_utterance on each input if you want.
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
index = 1
with open(os.path.join(in_dir, 'metadata.csv'), encoding='utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = os.path.join(in_dir, 'wavs', '%s.wav' % parts[0])
text = parts[2]
futures.append(executor.submit(partial(_process_utterance, out_dir, index, wav_path, text)))
index += 1
return [future.result() for future in tqdm(futures)]
示例2: main
def main():
"""
Makes banner requests with a ThreadPoolExecutor.
"""
arg_parser = ArgumentParser()
arg_parser.add_argument("--ip", help="IP address", required=True)
arg_parser.add_argument("--pool", help="Executor pool type", choices=("thread", "process"), required=True)
arg_parser.add_argument(
"--workers", help="Number of executor workers", type=int, choices=range(1, 9), required=True
)
args = arg_parser.parse_args()
ip = args.ip
pool = args.pool
workers = args.workers
if pool == "process":
executor = ProcessPoolExecutor(max_workers=workers)
elif pool == "thread":
executor = ThreadPoolExecutor(max_workers=workers)
for i in range(1, 256):
for port in get_ports():
executor.submit(banner_request, "{0}.{1}".format(ip, i), port)
print("[!] Finished spawning banner requests")
示例3: ThreadPool
class ThreadPool(object):
'''线程池实现'''
def __init__(self, thread_num=1, process_num=1, q_size=2000, daemon=True):
self.thread_pool = _ThreadPoolExecutor(thread_num, daemon)
self.process_pool = ProcessPoolExecutor(process_num)
self.result_queue = Queue(q_size)
def wait(self, threads=[]):
thread_wait(threads)
def add_thread(self, target, args=()):
result = self.thread_pool.submit(target, *args)
return result
def add_process(self, target, args=()):
result = self.process_pool.submit(target, *args)
return result
def thread_map(self, target, args=[]):
return [self.thread_pool.submit(target, arg) for arg in args]
def process_map(self, target, args=[]):
return self.process_pool.map(target, args)
def map(self, target, args=[]):
return self.process_map(target, args)
示例4: __call__
def __call__(self, workflow, input_artifact_filepaths,
parameter_references, output_artifact_filepaths):
input_artifact_abs_filepaths = \
{k: os.path.abspath(v)
for k, v in input_artifact_filepaths.items()}
output_artifact_abs_filepaths = \
{k: os.path.abspath(v)
for k, v in output_artifact_filepaths.items()}
job = workflow.to_script(input_artifact_abs_filepaths,
parameter_references,
output_artifact_abs_filepaths)
temp_dir = tempfile.mkdtemp()
pool = ProcessPoolExecutor(max_workers=1)
py_filename = os.path.join(temp_dir, 'job.py')
with open(py_filename, 'w') as py_file:
py_file.write(job.code)
# TODO: handle subproccess exceptions
future = pool.submit(subprocess.run,
[self._python_executable, py_filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# TODO: handle callback exceptions
# TODO: make sure that tempdir is cleaned up even if there is an
# exception in pool.submit or the callback
future.add_done_callback(lambda _: shutil.rmtree(temp_dir))
return future
示例5: build_from_path
def build_from_path(hparams, input_dirs, mel_dir, linear_dir, wav_dir, n_jobs=12, tqdm=lambda x: x):
"""
Preprocesses the speech dataset from a gven input path to given output directories
Args:
- hparams: hyper parameters
- input_dir: input directory that contains the files to prerocess
- mel_dir: output directory of the preprocessed speech mel-spectrogram dataset
- linear_dir: output directory of the preprocessed speech linear-spectrogram dataset
- wav_dir: output directory of the preprocessed speech audio dataset
- n_jobs: Optional, number of worker process to parallelize across
- tqdm: Optional, provides a nice progress bar
Returns:
- A list of tuple describing the train examples. this should be written to train.txt
"""
# We use ProcessPoolExecutor to parallelize across processes, this is just for
# optimization purposes and it can be omited
executor = ProcessPoolExecutor(max_workers=n_jobs)
futures = []
index = 1
for input_dir in input_dirs:
with open(os.path.join(input_dir, 'metadata.csv'), encoding='utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = os.path.join(input_dir, 'wavs', '{}.wav'.format(parts[0]))
text = parts[2]
futures.append(executor.submit(partial(_process_utterance, mel_dir, linear_dir, wav_dir, index, wav_path, text, hparams)))
index += 1
return [future.result() for future in tqdm(futures) if future.result() is not None]
示例6: parallel
def parallel(self, parallel):
# shutdown any previous executor if we are managing it
if getattr(self, '_managing_executor', False):
self._executor.shutdown()
self._parallel = parallel
self._managing_executor = False
if parallel is False:
self._executor = None
return
if parallel is True:
from concurrent.futures import ProcessPoolExecutor
self._executor = ProcessPoolExecutor()
self._managing_executor = True
return
if isinstance(parallel, numbers.Number):
from concurrent.futures import ProcessPoolExecutor
self._executor = ProcessPoolExecutor(parallel)
self._managing_executor = True
return
# assume a pool-executor has been supplied
self._executor = parallel
示例7: _Worker
class _Worker(object):
def __init__(self, protocol=None):
self.protocol = protocol
self.pool = ProcessPoolExecutor(max_workers=1)
self.pool.submit(id, 42).result() # start the worker process
def run(self, func, *args, **kwargs):
"""Synchronous remote function call"""
input_payload = dumps((func, args, kwargs), protocol=self.protocol)
result_payload = self.pool.submit(
call_func, input_payload, self.protocol).result()
result = loads(result_payload)
if isinstance(result, BaseException):
raise result
return result
def memsize(self):
workers_pids = [p.pid if hasattr(p, "pid") else p
for p in list(self.pool._processes)]
num_workers = len(workers_pids)
if num_workers == 0:
return 0
elif num_workers > 1:
raise RuntimeError("Unexpected number of workers: %d"
% num_workers)
return psutil.Process(workers_pids[0]).memory_info().rss
def close(self):
self.pool.shutdown(wait=True)
示例8: __init__
def __init__(self, apiurl, apiversion, charmworldurl=None, io_loop=None):
"""Initialize the deployer.
The apiurl argument is the URL of the juju-core WebSocket server.
The apiversion argument is the Juju API version (e.g. "go").
"""
self._apiurl = apiurl
self._apiversion = apiversion
if charmworldurl is not None and not charmworldurl.endswith('/'):
charmworldurl = charmworldurl + '/'
self._charmworldurl = charmworldurl
if io_loop is None:
io_loop = IOLoop.current()
self._io_loop = io_loop
# Deployment validation and importing executors.
self._validate_executor = ProcessPoolExecutor(1)
self._run_executor = ProcessPoolExecutor(1)
# An observer instance is used to watch the deployments progress.
self._observer = utils.Observer()
# Queue stores the deployment identifiers corresponding to the
# currently started/queued jobs.
self._queue = []
# The futures attribute maps deployment identifiers to Futures.
self._futures = {}
示例9: on_message
def on_message(self, message):
print len(message)
result = yield tornado.gen.Task(self.process_message, message)
return
pool = ProcessPoolExecutor()
fut = pool.submit(call_process, message)
ret = yield fut
pool.shutdown()
示例10: run_simulation
def run_simulation(datasets, workers_num):
workers = [TroiaWebDemoUser(get_troia_client(),
"TES_TROJ_JID_" + str(i)) for i in xrange(workers_num)]
for worker in workers:
worker.set_datasets(datasets)
executor = ProcessPoolExecutor(workers_num)
# maap = map
maap = lambda *args, **kwargs: list(executor.map(*args, **kwargs))
maap(exec_fun, workers, repeat(ITERATIONS, workers_num))
示例11: splice_gmaps
def splice_gmaps(threadpool, tilefolder, tempfiles, name):
processpool = ProcessPoolExecutor()
caption = "Rendering Zoom Layers {}".format(name)
loadingbar = Bar(caption=caption)
loadingbar.set_progress(0, caption)
pygame.display.update()
side = 1600
zoom_levels = 4
factor = 2 ** (zoom_levels - 1)
masterside = side * factor
plates = generate_plate_coords(factor, tempfiles)
master_surface = pygame.Surface((masterside, masterside))
done = 0
total = len(tempfiles) + len(plates) * sum((4 ** x for x in range(zoom_levels)))
fraction = 100 / total
def render_base_to_master(task):
imgdata, size, location = task.result()
tempsurf = pygame.image.frombuffer(imgdata, size, "RGB")
master_surface.blit(tempsurf, location)
tasks = []
for masterpos, pieces in plates.items():
master_surface.fill((132, 170, 248))
for x, y in pieces:
task = processpool.submit(unpack, tempfiles, x, y, ((x % factor) * side, (y % factor) * side))
tasks.append(threadpool.submit(render_base_to_master, task))
tasks.append(task)
current_area = masterside
for task in tasks:
task.result()
done += 0.5
loadingbar.set_progress(done * fraction, caption + " %4d of %4d" % (done, total))
for z in range(zoom_levels):
tasks = []
pieces = masterside // current_area
x_off = masterpos[0] * pieces
y_off = masterpos[1] * pieces
for xp in range(pieces):
for yp in range(pieces):
temp = pygame.Surface.subsurface(master_surface,
(xp * current_area, yp * current_area, current_area, current_area))
filename = "screen_{}_{}_{}.png".format(z + 1, x_off + xp, y_off + yp)
data = pygame.image.tostring(temp, "RGB")
tasks.append(processpool.submit(render_plate, data, tilefolder, temp.get_size(), side, filename))
for task in tasks:
task.result()
done += 1
loadingbar.set_progress(done * fraction, caption + " %4d of %4d" % (done, total))
current_area //= 2
processpool.shutdown()
示例12: main
def main(chunk):
nums = range(1, 1000)
pool = ProcessPoolExecutor()
count = 0
returned_iterator = pool.map(is_prime, nums, timeout=None, chunksize=chunk)
for result in returned_iterator:
if result:
count += 1
return count
示例13: _run
def _run(self, instance_id: str, service_id: str, plan_id: str, accepts_incomplete: bool, func: Callable, *func_args) -> Any:
# The _match_synchronicity call must come first because it may raise an exception
sync = self._match_synchronicity(service_id, plan_id, accepts_incomplete)
executor = ProcessPoolExecutor(max_workers=1)
future = executor.submit(func, *func_args)
if sync:
return future.result(timeout=59)
else:
self.async_ops[instance_id] = future
raise ProvisioningAsynchronously
示例14: compute_pi
def compute_pi(nr_tries=10000, pool_size=None, constructor=None):
if not constructor:
executor = ProcessPoolExecutor(max_workers=pool_size)
else:
executor = constructor(max_workers=pool_size)
args = [(nr_tries//pool_size, )
for _ in range(pool_size)]
results = executor.map(partial_pi, args)
if not pool_size:
pool_size = multiprocessing.cpu_count()
return sum(results)/pool_size
示例15: post
def post(self):
file = self.request.files['file'][0]
hark.client.login()
hark.client.createSession(default_hark_config)
log.info("Uploading asynchrounously")
pool = ProcessPoolExecutor(max_workers=2)
future = pool.submit(async_upload, file)
yield future
pool.shutdown()
log.info("Rendering visualization page")
self.render('visualize.html')