本文整理汇总了Python中multiprocessing.Pool.imap方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.imap方法的具体用法?Python Pool.imap怎么用?Python Pool.imap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Pool
的用法示例。
在下文中一共展示了Pool.imap方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: update_vertex_positions_mt
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def update_vertex_positions_mt(self):
num_procs = 16
pool = Pool(processes = num_procs)
self.blobj = self.get_blender_object().data.vertices
pool.imap(update_one_vertex_no_matrix, self._vertices, len(self._vertices)//8)
pool.close()
pool.join()
示例2: multi_validate_rows
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def multi_validate_rows(rows, col_size):
n_cores = 4
print('N_CORES', n_cores)
pool = Pool(n_cores)
chunks = ((rows[i::n_cores], col_size) for i in range(n_cores))
pool.imap(validate_rows, chunks)
pool.close()
pool.join()
示例3: repackage_revisions
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def repackage_revisions(revisions, revision_map, verify_run, staging_dir,
context, quit_event=None, progress_event=None):
"""Repackages all Chrome builds listed in revisions.
This function calls 'repackage_single_revision' with multithreading pool.
"""
p = Pool(3)
func = partial(repackage_single_revision, revision_map, verify_run,
staging_dir, context)
p.imap(func, revisions)
p.close()
p.join()
示例4: send
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def send(accounts, message):
num = 0
pool = Pool(processes=cpu_count()*2)
for data in accounts :
email, password = data['email'], data['pass']
proxy, num = proxies[num], num + 1
cookie = "cookies/" + str(data['email']) + "_cookie"
pool.imap(do_send, [(email, password, message, proxy, cookie)])
pool.close()
示例5: build_condensed_matrix
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def build_condensed_matrix(seqs, mode=2):
result = np.array([], dtype=default_dtype)
p = Pool(processes=cpu_count())
if mode == 1:
n = len(seqs)
#chunksize = 500000
chunksize = int(n * (n - 1) / 2 / cpu_count() / 2)
result_one = p.imap(get_score, make_iter(seqs, mode=1), chunksize=chunksize)
result = np.array(list(result_one), dtype=default_dtype)
else:
result_one_row = p.imap(get_scores_one_row, make_iter(seqs, mode=2), chunksize=100)
result = np.concatenate(list(result_one_row))
#p.close()
#p.join()
return result
示例6: main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def main():
starttime = datetime.now()
concatenate = False
parser = argparse.ArgumentParser(description="This program will run \
KaKs_Calculator on a directory.")
parser.add_argument("-i", help = "Path to input file.")
parser.add_argument("-o", help = "Path to output file.")
parser.add_argument("-m", default = "NG", help = "Method for calculating Ka/Ks.")
parser.add_argument("-t", type = int, default = 1, help = "Number of threads.")
# Parse arguments and assign to variables
args = parser.parse_args()
indir = args.i
if indir[-1] != "/":
indir += "/"
outdir = args.o
if outdir != "/":
outdir += "/"
method = args.m
cpu = args.t
if cpu > MAXCPU:
cpu = MAXCPU
# Call Ka/Ks_Calculator in parallel.
genes = glob(indir + "*.axt")
l = int(len(genes))
pool = Pool(processes = cpu)
func = partial(calculateKaKs, indir, outdir, method)
print("\tRunning KaKs_Caclulator with", str(cpu), "threads....")
rcml = pool.imap(func, genes)
pool.close()
pool.join()
# Compile output
compileKsKs(outdir)
print("\tKaKs_Calculator runtime: ", datetime.now() - starttime)
示例7: translate_concurrent
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def translate_concurrent(input_object, url, weights=None, num_processes=8):
pool = Pool(processes=num_processes)
text_args = [(line, weights, url) for line in input_object]
for translated_line in pool.imap(translate_single_line, text_args):
print translated_line
示例8: extract_new_dataframes
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def extract_new_dataframes(dirs):
pool = Pool(8)
pbar = tqdm.tqdm(total=len(dirs))
for job in pool.imap(extract_dataframe_subdir, dirs):
pbar.update(1)
pbar.close()
pool.close()
示例9: calc_mv_classifier
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def calc_mv_classifier(clf, scorer, regions=None, processes=7, method='sequential'):
import os.path as path
from tempfile import mkdtemp
n_regions = clf.data.shape[0]
if regions is None:
regions = range(0, n_regions)
if processes != 1:
from multiprocessing import Pool
pool = Pool(processes=processes)
else:
pool = itertools
pb = tools.ProgressBar(len(regions), start=True)
filename = path.join(mkdtemp(), 'data.dat')
data = np.memmap(filename, dtype='object', mode='w+', shape=clf.comp_dims)
data[:] = clf.data[:]
overall_results = []
for result in pool.imap(calc_mv_parallel_classifier, itertools.izip(itertools.repeat((filename, clf.classifier, scorer,
clf.comp_dims, clf.feature_importances, np.array(clf.feature_names), method)), regions)):
pb.next()
for row in result:
overall_results.append(row)
overall_results = pd.DataFrame(
overall_results, columns=['score', 'num_features', 'region', 'feature'])
overall_results.region += 1
return overall_results
示例10: newest_snapshot
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def newest_snapshot(project_id, hosts=None, timeout=20):
"""
Return most recent snapshot or empty string if none.
If host is a single ip address, return newest snapshot on that host.
If hosts is a list of ip addresses (or hostnames),
returns a dictionary with keys the entries in hosts
and the values the names of the newest snapshots.
Hosts that don't respond within timeout seconds are
ignored.
"""
if not isinstance(hosts, list):
return _newest_snapshot(project_id, hosts)
pool = Pool(processes=len(hosts))
start = time.time()
x = pool.imap(mp_newest_snapshot, [(project_id, dest) for dest in hosts])
result = []
while True:
try:
t = timeout - (start-time.time())
if t > 0:
result.append(x.next(t))
else:
raise TimeoutError
except TimeoutError, mesg:
log.info("timed out connecting to some destination -- %s", mesg)
pool.terminate()
break
except StopIteration:
break
示例11: main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def main(out):
out.write(('P4\n%d %d\n' % (size, size)).encode('ASCII'))
pool = Pool()
step = 2.0j / size
for row in pool.imap(do_row, (step*y-(1.5+1j) for y in range(size))):
out.write(row)
示例12: find_words
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def find_words(self):
""" Run all words through find_word using Pool.map """
if not all((self.number, self.wordlist, self.combos)):
raise ValueError('Must have a number, a wordlist, and combos!')
# TODO: Reduce memory footprint and waste on this whole operation.
def format_results(resultsets):
""" format final results """
if resultsets:
resultsfmt = {}
for resultset in resultsets:
if resultset:
resultsfmt.update(resultset)
return resultsfmt
return {}
# setup a pool of processes/workers.
pool = Pool(processes=self.processes)
# map find_word to the wordlist, and format final results.
rawresult = pool.imap(
self.find_word,
self.wordlist,
chunksize=self.chunksize)
results = format_results(rawresult)
return results, self.totallen
示例13: get_valid_fragments
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def get_valid_fragments(G, stoich_rank):
#reactions, complexes = bipartite.sets(G)
complexes, reactions = bipartite.sets(G)
complexes = list(complexes)
reactions = list(reactions)
if 'w1' not in complexes and 'w1' not in reactions:
raise Exception('my hack to resolve this unexpected behavior shown by bipartite.sets assumes that reaction nodes are named \'w1\', \'w2\', ...')
if 'w1' in complexes:
complexes, reactions = reactions, complexes
if not ('w1' in reactions and 's1' in complexes):
raise Exception('Something went wrong generating the lists of complexes of reactions.')
complex_perms = list(it.combinations(complexes,stoich_rank))
reaction_perms = list(it.combinations_with_replacement(reactions,stoich_rank))
fragments = list(it.product(complex_perms, reaction_perms))
valid_fragments = []
pool = Pool()
chunksize = 100
myval = functools.partial(validate_fragments, G, stoich_rank)
fragment_list = pool.imap(myval, fragments, chunksize)
valid_fragments = [f for f in fragment_list if f is not None]
return get_unique_fragments(valid_fragments)
示例14: subsample
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def subsample(cache_dir, image_sets, ipython_profile):
parameters = [(cache_dir, images) for images in image_sets]
if ipython_profile:
from IPython.parallel import Client, LoadBalancedView
client = Client(profile='lsf')
lview = client.load_balanced_view()
generator = lview.imap(_compute_group_subsample, parameters)
elif ipython_profile == False:
generator = (_compute_group_subsample(p) for p in parameters)
else:
from multiprocessing import Pool
lview = Pool()
generator = lview.imap(_compute_group_subsample, parameters)
progress = progressbar.ProgressBar(widgets=['Subsampling:',
progressbar.Percentage(), ' ',
progressbar.Bar(), ' ',
progressbar.Counter(), '/',
str(len(parameters)), ' ',
progressbar.ETA()],
maxval=len(parameters))
results = list(generator)
subsample = []
for i, (p, r) in enumerate(zip(parameters, results)):
if r is None:
print >>sys.stderr, '#### There was an error, recomputing locally: %s' % parameters[i][1]
results[i] = _compute_group_subsample(p) # just to see throw the exception
subsample.extend(r)
print "the subsampling set contains %d items" % len(subsample)
return subsample
示例15: main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap [as 别名]
def main(data, total):
global all_headlines
global inv
with open(INV_DOC_COUNTS) as inf:
inv = json.load(inf)
all_headlines = get_headlines(data)
pool = Pool(1)
counter = 0
out_data = []
for article, possible_headlines in pool.imap(assign_headline_tfidf_total, data[:3]):
print counter, article["headline"], possible_headlines[0]
counter += 1
article["top_tfidf"] = possible_headlines
out_data += [article]
with open(RESULT_FILE,'w') as outf:
json.dump(out_data, outf)
num_correct = 0
num_incorrect = 0
incrt = []
for article in out_data:
if article["headline"] == article["tf_idf_prediction"][0][0]:
num_correct += 1
else:
num_incorrect += 1
incrt += [article]
print "Num correct: %i" % num_correct
print "Num incorrect: %i" % num_incorrect
with open(OUT_INCORRECT,'w') as outf:
json.dump(incrt, outf)