本文整理汇总了Python中concurrent.futures.ProcessPoolExecutor.map方法的典型用法代码示例。如果您正苦于以下问题:Python ProcessPoolExecutor.map方法的具体用法?Python ProcessPoolExecutor.map怎么用?Python ProcessPoolExecutor.map使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类concurrent.futures.ProcessPoolExecutor
的用法示例。
在下文中一共展示了ProcessPoolExecutor.map方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ThreadPool
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
class ThreadPool(object):
'''线程池实现'''
def __init__(self, thread_num=1, process_num=1, q_size=2000, daemon=True):
self.thread_pool = _ThreadPoolExecutor(thread_num, daemon)
self.process_pool = ProcessPoolExecutor(process_num)
self.result_queue = Queue(q_size)
def wait(self, threads=[]):
thread_wait(threads)
def add_thread(self, target, args=()):
result = self.thread_pool.submit(target, *args)
return result
def add_process(self, target, args=()):
result = self.process_pool.submit(target, *args)
return result
def thread_map(self, target, args=[]):
return [self.thread_pool.submit(target, arg) for arg in args]
def process_map(self, target, args=[]):
return self.process_pool.map(target, args)
def map(self, target, args=[]):
return self.process_map(target, args)
示例2: run_simulation
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
def run_simulation(datasets, workers_num):
workers = [TroiaWebDemoUser(get_troia_client(),
"TES_TROJ_JID_" + str(i)) for i in xrange(workers_num)]
for worker in workers:
worker.set_datasets(datasets)
executor = ProcessPoolExecutor(workers_num)
# maap = map
maap = lambda *args, **kwargs: list(executor.map(*args, **kwargs))
maap(exec_fun, workers, repeat(ITERATIONS, workers_num))
示例3: main
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
def main(chunk):
nums = range(1, 1000)
pool = ProcessPoolExecutor()
count = 0
returned_iterator = pool.map(is_prime, nums, timeout=None, chunksize=chunk)
for result in returned_iterator:
if result:
count += 1
return count
示例4: __init__
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
class GeneticSearcher:
def __init__(self, pop_size, problem):
self.problem = problem
self.pop = [Network.random_network() for i in range(pop_size)]
self.fitness_cache = {}
self.best = None
self.nt = NetTester(problem)
self.pp = ProcessPoolExecutor(max_workers=4)
self.ntf = NetworkTesterFactory(problem)
self.pop_size = pop_size
def recalculate_fitness(self):
nets_to_rate = [net for net in self.pop if net not in self.fitness_cache]
for net, res in self.pp.map(self.ntf.rate_network, nets_to_rate):
self.fitness_cache[net] = res
def selection(self):
population_fitness = [(net, self.fitness_cache[net]) for net in self.pop]
population_fitness = sorted(population_fitness, reverse=True, key=lambda x: x[1])
self.best = population_fitness[0]
return list(map(lambda x: x[0], population_fitness[:int(self.pop_size / 3)]))
def crossing(self, parents):
children = []
while len(children) < self.pop_size / 3:
parents = random.sample(set(parents), 2)
children.append(self.problem.crossing(parents[0], parents[1]))
return children
def mutation(self, population):
mutants = []
while len(mutants) < 0.3 * self.pop_size:
mutants.append(self.problem.mutate(random.choice(population)))
return mutants
def iteration(self):
self.recalculate_fitness()
old_survivors = self.selection()
children = self.crossing(old_survivors)
mutants = self.mutation(old_survivors)
new_generation = old_survivors + children + mutants
while len(new_generation) < self.pop_size:
new_generation.append(Network.random_network())
self.pop = new_generation
return self.best[1]
def show_best(self):
self.nt.test(self.best[0], render=True)
示例5: compute_pi
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
def compute_pi(nr_tries=10000, pool_size=None, constructor=None):
if not constructor:
executor = ProcessPoolExecutor(max_workers=pool_size)
else:
executor = constructor(max_workers=pool_size)
args = [(nr_tries//pool_size, )
for _ in range(pool_size)]
results = executor.map(partial_pi, args)
if not pool_size:
pool_size = multiprocessing.cpu_count()
return sum(results)/pool_size
示例6: main
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
def main():
numbers = [
(1963309, 2265973),
(2030677, 3814172),
(1551645, 2229620),
(2039045, 2020802)
]
start = time()
pool = ProcessPoolExecutor(max_workers=2)
results = list(pool.map(gcd, numbers))
end = time()
print('Took %.3f seconds' % (end - start))
示例7: make_arch_db
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
def make_arch_db():
executor = ProcessPoolExecutor(max_workers=8)
by = 10000
m = 60000000
#by = 2000
#m = 10000
e = executor.map(process_range, zip(range(0, m, by),range(by, m+by, by)))
executor.shutdown()
print('done calculating architectures')
pfam_sets = merge(e)
print(len(pfam_sets))
gsave(pfam_sets,'pfam_sets.pkl.gz')
# mongodb
db = MongoClient('wl-cmadmin', 27017).ArchDB_Pfam_071414.ArchDB_Pfam_071414
db.insert(map(lambda item: {'_id': min(item[1]), 'pID': list(item[1]), 'Pfam': item[0]}, pfam_sets.items()))
db.ensure_index('pID')
db.ensure_index('Pfam')
示例8: main
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
def main():
cases = {}
meta_cases = {}
meta_meta_cases = {}
if len(sys.argv) != 2:
print("Usage: %s <path to binary file>" % sys.argv[0])
return 0
if not os.path.exists(sys.argv[1]):
print("No such file %s" % sys.argv[1])
return 1
fsize = os.stat(sys.argv[1]).st_size
if fsize < MAX_OPLEN:
print("muy pequeño: %s" % sys.argv[1])
return 1
with open(sys.argv[1], "rb") as f:
input_data = f.read()
pool = ProcessPoolExecutor(CONCURRENCY)
for offset in range(0, fsize-20, CONCURRENCY):
inputs = [hexlify(input_data[o:o+MAX_OPLEN])
for o in range(offset, offset+CONCURRENCY)]
tasks = pool.map(check_hexpairs, inputs)
for res in tasks:
if not res:
continue
inskey = res['case']
insmkey = res['metacase']
insmmkey = res['metametacase']
meta_meta_cases[insmmkey] = meta_meta_cases.get(insmmkey, 0) + 1
meta_cases[insmkey] = meta_cases.get(insmkey, 0) + 1
if (meta_cases[insmkey] > MAX_METACASE_EXAMPLES or
meta_meta_cases[insmmkey] > MAX_META_META_CASE_EXAMPLES):
pass
elif inskey not in cases:
cases[inskey] = cases.get(inskey, 0) + 1
print("%s\n" % json.dumps(res, indent=4))
示例9: run
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
def run():
args = get_config()
dargs = vars(args)
# launch a bunch of processes to look at all systematics
if args.systematic == 'all':
dargs['quiet'] = True # multiprocessing makes a mess of the outputs
systs = get_all_systematics(args.files)
syst_args = {x: dargs.copy() for x in systs}
for syst in systs:
syst_args[syst]['systematic'] = syst
executor = Executor()
counts_list = executor.map(run_systematic, syst_args.values())
counts_dict = {}
for subdict in counts_list:
counts_dict.update(subdict)
# or just do one...
else:
counts_dict = run_systematic(dargs)
with open(dargs['output'],'w') as out_yml:
translated = fitinputs.translate_to_fit_inputs(counts_dict)
out_yml.write(yaml.dump(translated))
示例10: is_odd_number
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
from concurrent.futures import ProcessPoolExecutor
def is_odd_number(number):
return number % 2
executor = ProcessPoolExecutor()
it = executor.map(is_odd_number, [1, 2], timeout=1)
print(next(it))
print(next(it))
示例11: do_multi_process
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
def do_multi_process():
start = time()
pool = ProcessPoolExecutor(max_workers=2)
result = list(pool.map(gcd, numbers))
end = time()
print('Took %.3f seconds' % (end - start))
示例12: main
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
def main():
arguments = create_parser()
if arguments.get('show_version'):
print(INTRO)
return
if 'settings_path' in arguments:
sp = arguments['settings_path']
arguments['settings_path'] = os.path.abspath(sp) if os.path.isdir(sp) else os.path.dirname(os.path.abspath(sp))
if not os.path.isdir(arguments['settings_path']):
print("WARNING: settings_path dir does not exist: {0}".format(arguments['settings_path']))
if 'virtual_env' in arguments:
venv = arguments['virtual_env']
arguments['virtual_env'] = os.path.abspath(venv)
if not os.path.isdir(arguments['virtual_env']):
print("WARNING: virtual_env dir does not exist: {0}".format(arguments['virtual_env']))
file_names = arguments.pop('files', [])
if file_names == ['-']:
SortImports(file_contents=sys.stdin.read(), write_to_stdout=True, **arguments)
else:
if not file_names:
file_names = ['.']
arguments['recursive'] = True
if not arguments.get('apply', False):
arguments['ask_to_apply'] = True
config = from_path(os.path.abspath(file_names[0]) or os.getcwd()).copy()
config.update(arguments)
wrong_sorted_files = False
skipped = []
if arguments.get('recursive', False):
file_names = iter_source_code(file_names, config, skipped)
num_skipped = 0
if config['verbose'] or config.get('show_logo', False):
print(INTRO)
jobs = arguments.get('jobs')
if jobs:
executor = ProcessPoolExecutor(max_workers=jobs)
for sort_attempt in executor.map(functools.partial(sort_imports, **arguments), file_names):
if not sort_attempt:
continue
incorrectly_sorted = sort_attempt.incorrectly_sorted
if arguments.get('check', False) and incorrectly_sorted:
wrong_sorted_files = True
if sort_attempt.skipped:
num_skipped += 1
else:
for file_name in file_names:
try:
sort_attempt = SortImports(file_name, **arguments)
incorrectly_sorted = sort_attempt.incorrectly_sorted
if arguments.get('check', False) and incorrectly_sorted:
wrong_sorted_files = True
if sort_attempt.skipped:
num_skipped += 1
except IOError as e:
print("WARNING: Unable to parse file {0} due to {1}".format(file_name, e))
if wrong_sorted_files:
exit(1)
num_skipped += len(skipped)
if num_skipped and not arguments.get('quiet', False):
if config['verbose']:
for was_skipped in skipped:
print("WARNING: {0} was skipped as it's listed in 'skip' setting"
" or matches a glob in 'skip_glob' setting".format(was_skipped))
print("Skipped {0} files".format(num_skipped))
示例13: __init__
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
class ScoreProcessor:
IO_WORKER_MULTIPLIER = 0.25
MIN_IO_WORKERS = 2
MAX_IO_WORKERS = 10
def __init__(self, scoring_model, extractor, cpu_workers=None,
io_workers=None, batch_size=50):
self.scoring_model = scoring_model
self.extractor = extractor
self.cpu_workers = \
int(cpu_workers) if cpu_workers is not None else cpu_count()
self.batch_size = int(batch_size)
if io_workers is not None:
self.io_workers = int(io_workers)
else:
self.io_workers = max(self.MIN_IO_WORKERS,
min(self.MAX_IO_WORKERS,
int(self.cpu_workers *
self.IO_WORKER_MULTIPLIER)))
logger.info("Starting up IO thread pool with {0} workers"
.format(self.io_workers))
self.scores_ex = ThreadPoolExecutor(max_workers=self.io_workers)
logger.info("Starting up CPU thread pool with {0} workers"
.format(self.cpu_workers))
self.process_ex = ProcessPoolExecutor(max_workers=self.cpu_workers)
roots = dependencies.dig(self.scoring_model.features)
self.root_datasources = [d for d in roots if isinstance(d, Datasource)]
def __enter__(self):
return self
def __exit__(self):
self.scores_executor.shutdown()
self.process_executor.shutdown()
def score(self, rev_ids, caches=None, cache=None):
if isinstance(rev_ids, int):
rev_ids = [rev_ids]
batches = batch_rev_caches(chunked(rev_ids, self.batch_size), caches,
cache)
for batch_scores in self.scores_ex.map(self._score_batch, batches):
for score in batch_scores:
yield score
def _score_batch(self, batch_rev_cache):
id_batch, caches, cache = batch_rev_cache
logger.debug("running _score_batch() on {0} rev_ids"
.format(len(id_batch)))
error_values = self.extractor.extract(
id_batch, self.root_datasources, caches=caches, cache=cache)
e_r_caches = self._group_error_root_caches(
id_batch, error_values, caches, cache)
rev_scores = self.process_ex.map(self._process_score, e_r_caches)
return list(rev_scores)
def _group_error_root_caches(self, id_batch, error_values, caches, cache):
for rev_id, (error, vals) in zip(id_batch, error_values):
if error:
score_cache = {}
scoring_model = None
extractor = None
else:
score_cache = {}
score_cache.update(cache or {})
score_cache.update((caches or {}).get(rev_id, {}))
score_cache.update({rd: rv for rd, rv in
zip(self.root_datasources, vals)})
scoring_model = self.scoring_model
extractor = self.extractor
yield (rev_id, scoring_model, extractor, score_cache, error)
@classmethod
def _process_score(cls, e_r_caches):
rev_id, scoring_model, extractor, cache, error = e_r_caches
logger.debug("running _process_score() on {0}".format(rev_id))
if error is None:
try:
feature_values = list(extractor.solve(
scoring_model.features, cache=cache))
except Exception as error:
logger.debug("An error occured during feature extraction")
raise error
return rev_id, error_score(error)
try:
score = scoring_model.score(feature_values)
return rev_id, score
except Exception as error:
logger.debug("An error occured during scoring")
return rev_id, error_score(error)
#.........这里部分代码省略.........
示例14: ProcessPoolExecutor
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
'AdjPval',
'Group1Name',
'AI',
'AI-pval',
'AI-null']
fname = 'more_phylip_BenjRes.tsv'
benj_writer = csv.DictWriter(open(fname, 'w'), benj_fields, delimiter = '\t')
benj_writer.writeheader()
multi = True
print 'Starting multiprocessing!'
if multi:
pool = ProcessPoolExecutor(max_workers = 30)
results = pool.map(calculate_region, yield_regions(trop_dict))
else:
results = imap(calculate_region, islice(yield_regions(trop_dict), 0,35))
for gname, sub, prot, win, start, benj_res in results:
#print prot, start, win
tdict = {
'Prot':prot,
'Start':start,
'WinSize':win,
'GroupName':gname,
'Subtype':sub,
}
if type(benj_res) is StringType:
if (benj_res == 'Already Processed') or benj_res.startswith('Too few unique sequences'):
示例15: main_3
# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import map [as 别名]
def main_3():
start = time.time()
pool = ProcessPoolExecutor(max_workers=8)
list(pool.map(gcd, numbers))
end = time.time()
return 'Took %.3f seconds' % (end - start)