本文整理汇总了Python中multiprocessing.Pool.starmap方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.starmap方法的具体用法?Python Pool.starmap怎么用?Python Pool.starmap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Pool
的用法示例。
在下文中一共展示了Pool.starmap方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def main():
pool = Pool()
a_args = [1,2,3]
second_arg = 1
argzip=zip(a_args, itertools.repeat(second_arg))
#pool.map(func, argzip)
pool.starmap(func, [(1,2),(2,3)])
示例2: call_func_parallel
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def call_func_parallel(func, args_iterator, workers=-1):
from multiprocessing import Pool
if workers == -1:
workers = get_core_count()
pool = Pool(workers)
pool.starmap(func, args_iterator)
pool.terminate()
示例3: process_image_list
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def process_image_list(l):
db.close_old_connections()
pool = Pool()
pool.starmap(migrate_image_resize, l)
pool.close()
pool.join()
示例4: run_sub_folders2
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def run_sub_folders2(MotherFolder, DaisyFileName, DaisyExecutabl, NumberOfProcesses=6, recursive=False):
"""
Runs all the Daisy simulations found below the MotherFolder
"""
pp = Pool(NumberOfProcesses)
input=[]
for i in range(NumberOfProcesses):
input.append( (MotherFolder, DaisyFileName, DaisyExecutabl, i, recursive, None) )
pp.starmap(run_single2, input)
pp.terminate()
示例5: main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def main():
num_process = 4
pool = Pool(num_process)
num_post = post_detail_infos.find({'segmented':{'$ne':1}}).count()
print('Number to segment: ' + str(num_post))
batch_size = int(num_post/num_process)
print('Batch size: ' + str(batch_size))
args = [(idx*batch_size, (idx+1)*batch_size) for idx in range(num_process)]
pool.starmap(segmentation, args)
pool.close()
pool.join()
示例6: multi_main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def multi_main(proc, FILENAME, FUN, **kargs):
pool = Pool(proc)
multiargs = []
# FPGrowth_LERS 用
if FUN == updateConfidenceSupport :
min_sup_range = kargs['min_sup_range']
for iter1, iter2, min_sup in product(range(1,2), range(1,11), min_sup_range):
multiargs.append((FILENAME, iter1, iter2, min_sup))
print(multiargs)
pool.starmap(FUN, multiargs)
else :
print("I dont' know the function.")
示例7: write_cost_matrix
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def write_cost_matrix(self):
begin = timeit.default_timer()
pool = Pool(processes=cpu_count())
iterable = []
for i in range(self.n):
for j in range(i+1,self.n):
iterable.append((i,j))
pool.starmap(self.set_total_costs_matrix, iterable)
self.total_costs_matrix.dump(os.getcwd()+'/memoria/outputs/cost_matrix')
end = timeit.default_timer()
print(end - begin)
示例8: parse_wsj
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def parse_wsj(processes=8):
ptb = LazyCorpusLoader( # Penn Treebank v3: WSJ portions
'ptb', CategorizedBracketParseCorpusReader, r'wsj/\d\d/wsj_\d\d\d\d.mrg',
cat_file='allcats.txt', tagset='wsj')
fileids = ptb.fileids()
params = []
for f in fileids:
corpus = zip(ptb.parsed_sents(f), ptb.tagged_sents(f))
for i, (parsed, tagged) in enumerate(corpus):
params.append((f, i, parsed, tagged))
p = Pool(processes)
p.starmap(get_best_parse, sorted(params, key=lambda x: (x[0], x[1])))
示例9: repeat_databases
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def repeat_databases(source_path, database_count, exploration_count=0, exploitation_count=0, random_count=0, processes=7,
add_new=False, exploit_method='furthest', record_errors=True, start_number=0):
# Check source path
if not os.path.isdir(source_path):
print('Unable to find source:' + source_path)
return
# Generate threading lists
if add_new:
items = os.listdir(source_path)
databases = []
for item in items:
try:
databases.append(int(item))
except ValueError:
continue
if len(databases) != 0:
start_number = max(databases) + 1
end_number = start_number + database_count
paths = PathNaming(os.name, database_path=source_path)
database_paths = [source_path + str(i) + paths.slash for i in range(start_number, end_number)]
explorations = [exploration_count for i in range(database_count)]
exploitations = [exploitation_count for i in range(database_count)]
randoms = [random_count for i in range(database_count)]
exploit_method = [exploit_method for i in range(database_count)]
record_errors = [record_errors for i in range(database_count)]
# Make a new folder for each database and place the input files in it
for i in range(database_count):
if not os.path.isdir(database_paths[i]):
os.mkdir(database_paths[i])
shutil.copy(source_path + paths.base_input, database_paths[i])
shutil.copy(source_path + paths.dbase_input, database_paths[i])
# If there's existing libraries to begin with, copy them as well
if os.path.isdir(source_path + paths.slash + paths.FR_Input_folder + paths.slash):
source_dir = source_path + paths.slash + paths.FR_Input_folder + paths.slash
for i in range(database_count):
copy_tree(source_dir, database_paths[i] + paths.slash + paths.FR_Input_folder)
if os.path.isdir(source_path + paths.slash + paths.FR_Output_folder + paths.slash):
source_dir = source_path + paths.slash + paths.FR_Output_folder + paths.slash
for i in range(database_count):
copy_tree(source_dir, database_paths[i] + paths.slash + paths.FR_Output_folder)
# Run databases
pool = Pool(processes=processes)
pool.starmap(database_thread, zip(database_paths, explorations, exploitations, randoms, exploit_method,
record_errors))
return
示例10: pool_decode
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def pool_decode(self, data, callback):
"""
Decode mz and i values in parallel.
Args:
data (): ...
Keyword Args:
callback (:obj:`func`): Callback function to call if decoding is
finished. Should be :py:meth:`~pymzml.spec.Spectrum._register`.
"""
ZE_POOL = Pool(processes=2)
ZE_POOL.starmap(_decode, data)
示例11: main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def main():
# user_statistic(0, 50)
num_process = 4
pool = Pool(num_process)
num_post = post_detail_infos.find({'crawled_user_info':{'$ne':1}}).count()
print('Number to get user infos: ' + str(num_post))
batch_size = int(num_post/num_process)
print('Batch size: ' + str(batch_size))
args = [(idx*batch_size, (idx+1)*batch_size) for idx in range(num_process)]
pool.starmap(user_statistic, args)
pool.close()
pool.join()
示例12: main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def main(processes=1):
ptb = Constants().ptb
fileids = list(ptb.fileids())
params = []
for fileid in fileids[:10]:
for sent_num, parse_tree in enumerate(ptb.parsed_sents(fileid)):
params.append((fileid, sent_num, parse_tree))
if processes > 1:
p = Pool(processes)
p.starmap(score, sorted(params, key=lambda x: (x[0], x[1])))
else:
for param in params:
score(*param)
示例13: main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def main():
if len(sys.argv) != 4:
print("Usage: python3 tweetTokenize.py <tweets_folder> <dest_folder> <num_process>")
sys.exit(-1)
tweets_folder = sys.argv[1]
dest_folder = sys.argv[2]
num_process = int(sys.argv[3])
tweets_filenames = glob.glob(os.path.join(tweets_folder, '*'))
tweets_filenames = [(f, dest_folder) for f in tweets_filenames]
if num_process == 1:
for f, dest_folder in tweets_filenames:
tokenize_tweets(f, dest_folder)
else:
pool = Pool(num_process)
pool.starmap(tokenize_tweets, tweets_filenames)
示例14: ctag
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def ctag(inputfiles, output_file, remove_stop_words, language, min_freq, min_len, pdf_output, debug, cpu_count):
startTime = time.time()
lInfo("process {} files".format(len(inputfiles)))
pool = Pool(processes=cpu_count)
params = [(inputfile, remove_stop_words, language, min_len) for inputfile in inputfiles]
results = pool.starmap(build_word_histogram, params)
global_histogram = {}
for histogram in results:
global_histogram = add_dict(global_histogram, histogram)
# filter out words with a frequency that is not >= min_freq
global_histogram = {t: global_histogram[t] for t in global_histogram if global_histogram[t] >= min_freq}
if debug:
histogram_file = output_file.replace(os.path.splitext(output_file)[1], ".debug.json")
lInfo("for debugging write out intermediate histogram to: {}".format(histogram_file))
with open(histogram_file, "w") as hist:
hist.write(json.dumps(global_histogram, indent=4, sort_keys=True))
with open(output_file, "w") as outfile:
outfile.write(svg_cloud(global_histogram))
if pdf_output:
pdf_file_name = output_file.replace(os.path.splitext(output_file)[1], ".pdf")
lInfo("create pdf graphic: {}".format(pdf_file_name))
if shutil.which("inkscape") is None:
lError("inkscape is not installed, therefore no pdf export is available.")
else:
os.system("""inkscape --without-gui --export-pdf="{pdffile}" {svgfile}""".format(svgfile=output_file, pdffile=pdf_file_name))
lInfo("done: {} s".format(time.time() - startTime))
示例15: gene_lcs
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import starmap [as 别名]
def gene_lcs(genomes,base_dir):
#create substring folder, check if substrings have previously been calculated
if not os.path.exists(base_dir+'substrings/'):
os.makedirs(base_dir+'substrings/')
os.chdir(base_dir+'substrings/')
#import previous substring file if it exists
substring_file = glob.glob('*.csv')
orgstring = []
if len(substring_file) == 1:
with open('substrings.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
orgstring.append(row[0])
if len(orgstring) == len(genomes):
print('Organism substrings already calculated')
else:
print('Finding common substrings...')
pool = ThreadPool(len(genomes))
orgstring = pool.starmap(extract,zip(genomes,repeat(base_dir)))
pool.close()
pool.join()
#write orgstring file
os.chdir(base_dir+'substrings/')
with open('substrings.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for line in orgstring:
writer.writerow([line])
return orgstring