本文整理汇总了Python中concurrent.futures.ProcessPoolExecutor.submit方法的典型用法代码示例。如果您正苦于以下问题:Python ProcessPoolExecutor.submit方法的具体用法?Python ProcessPoolExecutor.submit怎么用?Python ProcessPoolExecutor.submit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类concurrent.futures.ProcessPoolExecutor
的用法示例。
在下文中一共展示了ProcessPoolExecutor.submit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _Worker
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
class _Worker(object):
def __init__(self, protocol=None):
self.protocol = protocol
self.pool = ProcessPoolExecutor(max_workers=1)
self.pool.submit(id, 42).result() # start the worker process
def run(self, func, *args, **kwargs):
"""Synchronous remote function call"""
input_payload = dumps((func, args, kwargs), protocol=self.protocol)
result_payload = self.pool.submit(
call_func, input_payload, self.protocol).result()
result = loads(result_payload)
if isinstance(result, BaseException):
raise result
return result
def memsize(self):
workers_pids = [p.pid if hasattr(p, "pid") else p
for p in list(self.pool._processes)]
num_workers = len(workers_pids)
if num_workers == 0:
return 0
elif num_workers > 1:
raise RuntimeError("Unexpected number of workers: %d"
% num_workers)
return psutil.Process(workers_pids[0]).memory_info().rss
def close(self):
self.pool.shutdown(wait=True)
示例2: main
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def main():
"""
Makes banner requests with a ThreadPoolExecutor.
"""
arg_parser = ArgumentParser()
arg_parser.add_argument("--ip", help="IP address", required=True)
arg_parser.add_argument("--pool", help="Executor pool type", choices=("thread", "process"), required=True)
arg_parser.add_argument(
"--workers", help="Number of executor workers", type=int, choices=range(1, 9), required=True
)
args = arg_parser.parse_args()
ip = args.ip
pool = args.pool
workers = args.workers
if pool == "process":
executor = ProcessPoolExecutor(max_workers=workers)
elif pool == "thread":
executor = ThreadPoolExecutor(max_workers=workers)
for i in range(1, 256):
for port in get_ports():
executor.submit(banner_request, "{0}.{1}".format(ip, i), port)
print("[!] Finished spawning banner requests")
示例3: splice_gmaps
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def splice_gmaps(threadpool, tilefolder, tempfiles, name):
processpool = ProcessPoolExecutor()
caption = "Rendering Zoom Layers {}".format(name)
loadingbar = Bar(caption=caption)
loadingbar.set_progress(0, caption)
pygame.display.update()
side = 1600
zoom_levels = 4
factor = 2 ** (zoom_levels - 1)
masterside = side * factor
plates = generate_plate_coords(factor, tempfiles)
master_surface = pygame.Surface((masterside, masterside))
done = 0
total = len(tempfiles) + len(plates) * sum((4 ** x for x in range(zoom_levels)))
fraction = 100 / total
def render_base_to_master(task):
imgdata, size, location = task.result()
tempsurf = pygame.image.frombuffer(imgdata, size, "RGB")
master_surface.blit(tempsurf, location)
tasks = []
for masterpos, pieces in plates.items():
master_surface.fill((132, 170, 248))
for x, y in pieces:
task = processpool.submit(unpack, tempfiles, x, y, ((x % factor) * side, (y % factor) * side))
tasks.append(threadpool.submit(render_base_to_master, task))
tasks.append(task)
current_area = masterside
for task in tasks:
task.result()
done += 0.5
loadingbar.set_progress(done * fraction, caption + " %4d of %4d" % (done, total))
for z in range(zoom_levels):
tasks = []
pieces = masterside // current_area
x_off = masterpos[0] * pieces
y_off = masterpos[1] * pieces
for xp in range(pieces):
for yp in range(pieces):
temp = pygame.Surface.subsurface(master_surface,
(xp * current_area, yp * current_area, current_area, current_area))
filename = "screen_{}_{}_{}.png".format(z + 1, x_off + xp, y_off + yp)
data = pygame.image.tostring(temp, "RGB")
tasks.append(processpool.submit(render_plate, data, tilefolder, temp.get_size(), side, filename))
for task in tasks:
task.result()
done += 1
loadingbar.set_progress(done * fraction, caption + " %4d of %4d" % (done, total))
current_area //= 2
processpool.shutdown()
示例4: build_from_path
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def build_from_path(hparams, input_dirs, mel_dir, linear_dir, wav_dir, n_jobs=12, tqdm=lambda x: x):
"""
Preprocesses the speech dataset from a gven input path to given output directories
Args:
- hparams: hyper parameters
- input_dir: input directory that contains the files to prerocess
- mel_dir: output directory of the preprocessed speech mel-spectrogram dataset
- linear_dir: output directory of the preprocessed speech linear-spectrogram dataset
- wav_dir: output directory of the preprocessed speech audio dataset
- n_jobs: Optional, number of worker process to parallelize across
- tqdm: Optional, provides a nice progress bar
Returns:
- A list of tuple describing the train examples. this should be written to train.txt
"""
# We use ProcessPoolExecutor to parallelize across processes, this is just for
# optimization purposes and it can be omited
executor = ProcessPoolExecutor(max_workers=n_jobs)
futures = []
index = 1
for input_dir in input_dirs:
with open(os.path.join(input_dir, 'metadata.csv'), encoding='utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = os.path.join(input_dir, 'wavs', '{}.wav'.format(parts[0]))
text = parts[2]
futures.append(executor.submit(partial(_process_utterance, mel_dir, linear_dir, wav_dir, index, wav_path, text, hparams)))
index += 1
return [future.result() for future in tqdm(futures) if future.result() is not None]
示例5: __call__
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def __call__(self, workflow, input_artifact_filepaths,
parameter_references, output_artifact_filepaths):
input_artifact_abs_filepaths = \
{k: os.path.abspath(v)
for k, v in input_artifact_filepaths.items()}
output_artifact_abs_filepaths = \
{k: os.path.abspath(v)
for k, v in output_artifact_filepaths.items()}
job = workflow.to_script(input_artifact_abs_filepaths,
parameter_references,
output_artifact_abs_filepaths)
temp_dir = tempfile.mkdtemp()
pool = ProcessPoolExecutor(max_workers=1)
py_filename = os.path.join(temp_dir, 'job.py')
with open(py_filename, 'w') as py_file:
py_file.write(job.code)
# TODO: handle subproccess exceptions
future = pool.submit(subprocess.run,
[self._python_executable, py_filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# TODO: handle callback exceptions
# TODO: make sure that tempdir is cleaned up even if there is an
# exception in pool.submit or the callback
future.add_done_callback(lambda _: shutil.rmtree(temp_dir))
return future
示例6: build_from_path
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
'''Preprocesses the LJ Speech dataset from a given input path into a given output directory.
Args:
in_dir: The directory where you have downloaded the LJ Speech dataset
out_dir: The directory to write the output into
num_workers: Optional number of worker processes to parallelize across
tqdm: You can optionally pass tqdm to get a nice progress bar
Returns:
A list of tuples describing the training examples. This should be written to train.txt
'''
# We use ProcessPoolExecutor to parallelize across processes. This is just an optimization and you
# can omit it and just call _process_utterance on each input if you want.
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
index = 1
with open(os.path.join(in_dir, 'metadata.csv'), encoding='utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = os.path.join(in_dir, 'wavs', '%s.wav' % parts[0])
text = parts[2]
futures.append(executor.submit(partial(_process_utterance, out_dir, index, wav_path, text)))
index += 1
return [future.result() for future in tqdm(futures)]
示例7: ThreadPool
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
class ThreadPool(object):
'''线程池实现'''
def __init__(self, thread_num=1, process_num=1, q_size=2000, daemon=True):
self.thread_pool = _ThreadPoolExecutor(thread_num, daemon)
self.process_pool = ProcessPoolExecutor(process_num)
self.result_queue = Queue(q_size)
def wait(self, threads=[]):
thread_wait(threads)
def add_thread(self, target, args=()):
result = self.thread_pool.submit(target, *args)
return result
def add_process(self, target, args=()):
result = self.process_pool.submit(target, *args)
return result
def thread_map(self, target, args=[]):
return [self.thread_pool.submit(target, arg) for arg in args]
def process_map(self, target, args=[]):
return self.process_pool.map(target, args)
def map(self, target, args=[]):
return self.process_map(target, args)
示例8: main
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--filter", action="store_true", help="act as a filter")
group.add_argument("--transform", metavar="MAPPING", type=argparse.FileType("r"), help="transform all files given in the mapping file")
parser.add_argument("--srcprefix", metavar="PREFIX", default="", help="when transforming data files prepend this PREFIX to source paths")
parser.add_argument("--dstprefix", metavar="PREFIX", default="", help="when transforming data files prepend this PREFIX to destination paths")
args = parser.parse_args()
if args.filter:
check_stream(sys.stdin, sys.stdout)
else:
exe = Executor()
res = []
for lineno, line in enumerate(args.transform):
line = line.split('#', 1)[0] # comment
line = line.rstrip() # trailing space or newline
match = re.match(r'^(\S+):\s*(\S+)$', line)
if not match:
raise ValueError("syntax error on line %d" % (lineno + 1))
destination, source = match.groups()
source = os.path.join(args.srcprefix, source)
destination = os.path.join(args.dstprefix, destination)
res.append(exe.submit(transform, source, destination))
while res:
res.pop(0).result() # propagate exceptions
示例9: on_message
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def on_message(self, message):
print len(message)
result = yield tornado.gen.Task(self.process_message, message)
return
pool = ProcessPoolExecutor()
fut = pool.submit(call_process, message)
ret = yield fut
pool.shutdown()
示例10: _run
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def _run(self, instance_id: str, service_id: str, plan_id: str, accepts_incomplete: bool, func: Callable, *func_args) -> Any:
# The _match_synchronicity call must come first because it may raise an exception
sync = self._match_synchronicity(service_id, plan_id, accepts_incomplete)
executor = ProcessPoolExecutor(max_workers=1)
future = executor.submit(func, *func_args)
if sync:
return future.result(timeout=59)
else:
self.async_ops[instance_id] = future
raise ProvisioningAsynchronously
示例11: probe
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def probe( moduleName, args, torCtrl ):
logger.info("Running module '%s'." % moduleName)
module = __import__("modules.%s" % moduleName, fromlist=[moduleName])
# Obtain the list of exit relays to scan.
if args.exit:
exitRelays = [args.exit]
else:
hosts = [(socket.gethostbyname(host), port) for
(host, port) in module.targets]
exitRelays = exitselector.getExits(args.consensus,
countryCode=args.country,
hosts=hosts)
count = len(exitRelays)
if count < 1:
raise error.ExitSelectionError("Exit selection yielded %d exits " \
"but need at least one." % count)
logger.info("About to probe %d exit relays." % count)
# Create circuit pool and set up stream attacher.
circuitPool = circuitpool.new(torCtrl, list(exitRelays))
eventHandler = streamattacher.new(circuitPool, torCtrl)
torCtrl.add_event_listener(eventHandler.newEvent, EventType.STREAM)
circuits = torCtrl.get_circuits()
logger.debug("Open circuits:")
for circuit in circuits:
logger.debug(circuit)
executor = ProcessPoolExecutor(max_workers=const.CIRCUIT_POOL_SIZE)
logger.debug("Beginning to populate process pool with %d jobs." % count)
# Invoke a module instance for every exit relay.
for _ in xrange(count, 0, -1):
cmd = command.new(None)
executor.submit(module.probe, cmd, count)
count -= 1
logger.info("Submitted jobs. Terminating main scanner.")
示例12: post
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def post(self):
file = self.request.files['file'][0]
hark.client.login()
hark.client.createSession(default_hark_config)
log.info("Uploading asynchrounously")
pool = ProcessPoolExecutor(max_workers=2)
future = pool.submit(async_upload, file)
yield future
pool.shutdown()
log.info("Rendering visualization page")
self.render('visualize.html')
示例13: generate_stocks
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def generate_stocks(freq=pd.Timedelta(seconds=60), directory=None):
from concurrent.futures import ProcessPoolExecutor, wait
e = ProcessPoolExecutor()
if os.path.exists(os.path.join('data', 'daily')):
glob_path = os.path.join('data', 'daily', '*')
else:
glob_path = os.path.join(daily_dir, '*')
filenames = sorted(glob(glob_path))
futures = [e.submit(generate_stock, fn, directory=directory, freq=freq)
for fn in filenames]
wait(futures)
示例14: build_from_path
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def build_from_path(in_dir, out_dir, num_workers=1):
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
index = 1
with open(os.path.join(in_dir, 'metadata.csv'), encoding='utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = os.path.join(in_dir, 'wavs', '%s.wav' % parts[0])
text = parts[2]
futures.append(executor.submit(
partial(_process_utterance, out_dir, index, wav_path, text)))
index += 1
return [future.result() for future in futures]
示例15: main
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import submit [as 别名]
def main(argv=None):
usage = """REDCap Data Model Generator
Usage:
redcap dball <version> [--dir=DIR] [--db=DB] [--host=HOST] [--port=PORT] [--user=USER] [--pass=PASS]
Options:
-h --help Show this screen.
--dir=DIR Name of the directory to output the files [default: .].
--db=DB Name of the REDCap database [default: redcap].
--host=HOST Host of the database server [default: localhost].
--port=PORT Port of the database server [default: 3306].
--user=USER Username to connect with.
--pass=PASS Password to connect with. If set to *, a prompt will be provided.
--procs=PROCS Number of processes to spawn [default: 24].
""" # noqa
from docopt import docopt
args = docopt(usage, argv=argv, version='0.1')
if args['--pass'] == '*':
args['--pass'] = getpass('password: ')
conn = db_connect(args['--db'],
args['--host'],
args['--port'],
args['--user'],
args['--pass'])
project_names = db_projects(conn)
pool = ProcessPoolExecutor(max_workers=int(args['--procs']))
for name in project_names:
pool.submit(worker, name, args)
pool.shutdown()