本文整理汇总了Python中multiprocessing.pool.Pool.close方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.close方法的具体用法?Python Pool.close怎么用?Python Pool.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.pool.Pool
的用法示例。
在下文中一共展示了Pool.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _itergroundings
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def _itergroundings(self, simplify=False, unsatfailure=False):
global global_bpll_grounding
global_bpll_grounding = self
if self.multicore:
pool = Pool(maxtasksperchild=1)
try:
for gndresult in pool.imap(with_tracing(create_formula_groundings), self.formulas):
for fidx, stat in gndresult:
for (varidx, validx, val) in stat:
self._varidx2fidx[varidx].add(fidx)
self._addstat(fidx, varidx, validx, val)
checkmem()
yield None
except CtrlCException as e:
pool.terminate()
raise e
pool.close()
pool.join()
else:
for gndresult in imap(create_formula_groundings, self.formulas):
for fidx, stat in gndresult:
for (varidx, validx, val) in stat:
self._varidx2fidx[varidx].add(fidx)
self._addstat(fidx, varidx, validx, val)
yield None
示例2: start
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def start(self):
"""Starts a server that controls local workers.
Calling this function starts a pool of `num_workers` workers used to run
targets sent to the server. The server will run indefinitely unless shut
down by the user.
"""
try:
serv = Listener((self.hostname, self.port))
workers = Pool(
processes=self.num_workers,
initializer=Worker,
initargs=(self.status, self.queue, self.waiting),
)
logging.info(
"Started %s workers, listening on port %s",
self.num_workers,
serv.address[1],
)
self.wait_for_clients(serv)
except OSError as e:
if e.errno == 48:
raise ServerError(
(
"Could not start workers listening on port {}. "
"The port may already be in use."
).format(self.port)
)
except KeyboardInterrupt:
logging.info("Shutting down...")
workers.close()
workers.join()
self.manager.shutdown()
示例3: _MultiExecutor
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
class _MultiExecutor(_Executor):
"""Execute functions async in a process pool"""
def __init__(self):
super(_MultiExecutor, self).__init__()
self._children = 0
self.pool = Pool()
def _collector(self, result):
super(_MultiExecutor, self)._collector(result)
self._children -= 1
def execute(self, func, args):
self._children += 1
self.pool.apply_async(func, args, callback=self._collector)
def wait_for_results(self):
self.pool.close()
# One would have hoped joining the pool would take care of this, but
# apparently you need to first make sure that all your launched tasks
# has returned their results properly, before calling join, or you
# risk a deadlock.
while self._children > 0:
time.sleep(0.001)
self.pool.join()
示例4: get_correlation_parallel
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def get_correlation_parallel(s1,s2):
"""
params s1 - series 1
params s2 - series 2
NOTE : series are number 1 to 25 when giving in arguments
returns the correlation between series
"""
start = time.time()
offsets = [] #this will be the arguments to all the parallel jobs
instances = (MAX_ROWS/BATCH_SIZE)
mean,std = calculate_mean_std_parallel()
stripped_mean,stripped_std = calculate_stripped_mean_std_parallel(mean,std)
processes = Pool(processes=instances)
for i in range(instances):
offsets.append((s1,s2,mean,std,stripped_mean,stripped_std,i*BATCH_SIZE))
results = processes.map(get_correlation,offsets)
processes.close()
processes.join()
pearson_corr = 0
total = 0
for result in results:
pearson_corr += result[0]*result[1]
total += result[1]
pearson_corr = 1.0*pearson_corr / total
t_value = abs(pearson_corr*math.sqrt( 1.0*(total - 2) / ( 1 - (pearson_corr*pearson_corr))))
p_value = t.sf(t_value,total-2)
print "\n ######### CORRELATION BETWEEN SERIES ",s1," AND SERIES ",s2, " is ",pearson_corr , "t value is ", t_value ," and p value is ", p_value, "######### \n"
end = time.time()
print "EXECUTION TIME : ", end-start , " sec"
return pearson_corr
示例5: parse
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def parse(document, pages, parse_refs=True,
progress_monitor=NullProgressMonitor(),
pool_size=DEFAULT_POOL_SIZE):
progress_monitor.start('Parsing Pages', pool_size + 1)
# Prepare input
pages = [(page.local_url, page.url) for page in
pages.values() if page.local_url is not None]
pages_chunks = chunk_it(pages, pool_size)
inputs = []
for pages_chunk in pages_chunks:
inputs.append((document.parser, document.pk, parse_refs, pages_chunk))
# Close connection to allow the new processes to create their own.
connection.close()
# Split work
progress_monitor.info('Sending {0} chunks to worker pool'
.format(len(inputs)))
pool = Pool(pool_size)
for result in pool.imap_unordered(sub_process_parse, inputs, 1):
progress_monitor.work('Parsed 1/{0} of the pages'.\
format(pool_size), 1)
# Word Count
word_count = 0
for page in document.pages.all():
word_count += page.word_count
document.word_count = word_count
document.save()
progress_monitor.work('Counted Total Words', 1)
pool.close()
progress_monitor.done()
示例6: main
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def main(datadir, convert_dir, crop_size):
try:
os.mkdir(convert_dir)
except OSError:
pass
filenames = data_util.get_image_files(datadir)
print('Resizing images in {} to {}'.format(datadir, convert_dir))
n = len(filenames)
batch_size = 500
batches = n // batch_size + 1
p = Pool()
args = []
for f in filenames:
args.append((convert_size, (datadir, convert_dir, f, crop_size)))
for i in range(batches):
print('batch {:>2} / {}'.format(i + 1, batches))
p.map(convert, args[i * batch_size : (i + 1) * batch_size])
p.close()
p.join()
print('Done')
示例7: stat_volume
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def stat_volume(stime,etime):
tgsinfo = read_tgs_info()
# from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing.pool import Pool
pool = Pool()
volume = [pool.apply_async(stat_tgs_volume,args=(stime,etime,int(cid))) for cid in tgsinfo.keys()]
pool.close()
print 'waiting to join....'
pool.join()
print 'start to writing to file...'
volume0 = []
for i,elem in enumerate(volume):
volume0.append((tgsinfo.keys()[i], elem.get()))
volume0.sort(key=lambda x:x[1], reverse=True)
total = 0
with open(os.path.join(root_dir, "result", "volume.txt"),"w") as f:
for i,elem in enumerate(volume0):
# cid = tgsinfo.keys()[i]
# vol = elem.get()
total += elem[1]
line = "%5s,%s: %d\n" % (elem[0], tgsinfo[elem[0]]['kkmc'], elem[1])
f.write(line)
print 'totally %d records.' % (total)
示例8: main
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def main():
global pool
pool = Pool(POOL_SIZE)
nseeds = 100
# print("== generating seeds...")
# generate_seeds(nseeds)
#print("running const density experiments...")
#run_constant_density(0.1, range(100, 1000, 100), nseeds)
#print("running const size experiments...")
#run_constant_size(50, range(100, 1000, 100), nseeds)
print("== running aggregate interval experiments (const density)...")
# run_aggregate_interval_constant_density(0.1, range(100, 1000, 100), nseeds, [100, 500] + list(range(1000, 4000, 1000)))
run_aggregate_interval_constant_density(0.1, range(100, 1000, 100), nseeds, [3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
reset_pool()
run_aggregate_interval_constant_density(0.2, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
reset_pool()
run_aggregate_interval_constant_density(0.3, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
reset_pool()
run_aggregate_interval_constant_density(0.4, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
reset_pool()
run_aggregate_interval_constant_density(0.5, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
pool.close()
pool.join()
示例9: extract_all_labels
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def extract_all_labels(filenames, out_filepath=DATA_FOLDER+'labels.p', chunk_size=2000):
print "EXTRACTING ALL LABELS INTO {0}".format(out_filepath)
all_labels = []
label_dict = {}
filenames_chunks = util.chunks(filenames, chunk_size)
for i, chunk in enumerate(filenames_chunks):
pool = Pool(processes=util.CPU_COUNT)
chunk_labels = pool.map(extract_labels, chunk)
pool.close()
for filepath, labels in zip(chunk, chunk_labels):
if labels is not None:
file_id = util.filename_without_extension(filepath)
label_dict[file_id] = labels
all_labels += labels
print i+1, '/', len(filenames_chunks)
#Write labels to file
with open(out_filepath,'w') as f:
pickle.dump(label_dict, f)
print '\nLabels:'
print len(set(all_labels))
print Counter(all_labels)
示例10: work
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def work(host, port, processes, threads, times):
pool = Pool(processes,
lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
p = Process(target=progress)
p.daemon = True
start = time.time()
try:
for chunk in divide(times, processes):
pool.apply_async(thread, (host, port, threads, chunk))
p.start()
pool.close()
pool.join()
p.terminate()
p.join()
except KeyboardInterrupt:
pool.terminate()
p.terminate()
p.join()
pool.join()
return time.time() - start
示例11: _itergroundings
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def _itergroundings(self, simplify=True, unsatfailure=True):
# generate all groundings
if not self.formulas:
return
global global_fastConjGrounding
global_fastConjGrounding = self
batches = list(rndbatches(self.formulas, 20))
batchsizes = [len(b) for b in batches]
if self.verbose:
bar = ProgressBar(width=100, steps=sum(batchsizes), color='green')
i = 0
if self.multicore:
pool = Pool()
try:
for gfs in pool.imap(with_tracing(create_formula_groundings), batches):
if self.verbose:
bar.inc(batchsizes[i])
bar.label(str(cumsum(batchsizes, i + 1)))
i += 1
for gf in gfs: yield gf
except Exception as e:
logger.error('Error in child process. Terminating pool...')
pool.close()
raise e
finally:
pool.terminate()
pool.join()
else:
for gfs in imap(create_formula_groundings, batches):
if self.verbose:
bar.inc(batchsizes[i])
bar.label(str(cumsum(batchsizes, i + 1)))
i += 1
for gf in gfs: yield gf
示例12: ingest
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def ingest(
dataset,
cls,
skip_if_exists=True,
multi_process=False,
multi_threaded=False,
cores=None):
pool = None
if multi_process:
pool = Pool(cores or cpu_count())
map_func = pool.imap_unordered
elif multi_threaded:
pool = ThreadPool(cores or cpu_count())
map_func = pool.imap_unordered
else:
map_func = map
cls_args = repeat(cls)
skip_args = repeat(skip_if_exists)
map_func(ingest_one, zip(dataset, cls_args, skip_args))
if pool is not None:
# if we're ingesting using multiple processes or threads, the processing
# should be parallel, but this method should be synchronous from the
# caller's perspective
pool.close()
pool.join()
示例13: add_tree
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def add_tree(self, iterations=-1, snapshot=False):
"""
Multi-core, fully utilizes underlying CPU to create the trees
of the forest and stores them into the forest's list of trees
:param iterations: number of trees to make, -1 means use default setting
:return: None
"""
print("Adding trees:", iterations)
if iterations == -1:
iterations = self.default_tree_count
#########################
# MULTI THREADED
########################
pool = Pool() # creates multiple processes equal to cores in machine
outputs = pool.map(make_tree, [(self.data_copy(), self.depthlimit, self.weak_learner)
for _ in range(iterations)])
pool.close()
pool.join()
self.trees.extend(outputs) # get the trees created and store them
#########################
# SINGLE THREADED
########################
#for i in range(iterations):
# tree = Tree(self.data, self.bagging, self.bag_ratio, self.depthlimit, self.weak_learner)
# self.trees.append(tree) # get the trees created and store them
if snapshot:
self.sum_squares(len(self.trees)) # get error after each snapshot, if this command is run multiple times
示例14: Pool
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
class Pool(object):
'''
'''
def __init__(self, **pool_kwargs):
try:
kw = KwargsCheck(MPIPool, pool_kwargs)
self._pool = MPIPool(**kw)
self.MPI = True
except (ImportError, ValueError):
kw = KwargsCheck(MultiPool, pool_kwargs)
self._pool = MultiPool(**kw)
self.MPI = False
if self.MPI:
if not self._pool.is_master():
self._pool.wait()
sys.exit(0)
def map(self, f, x, args = (), kwargs = {}):
'''
'''
if len(args) or len(kwargs):
w = wrap(f, *args, **kwargs)
return self._pool.map(w, x)
else:
return self._pool.map(f, x)
def close(self):
self._pool.close()
示例15: query_tweets
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import close [as 别名]
def query_tweets(query, limit=None, begindate=dt.date(2006, 3, 21), enddate=dt.date.today(), poolsize=20, lang=''):
no_days = (enddate - begindate).days
if poolsize > no_days:
# Since we are assigning each pool a range of dates to query,
# the number of pools should not exceed the number of dates.
poolsize = no_days
dateranges = [begindate + dt.timedelta(days=elem) for elem in linspace(0, no_days, poolsize+1)]
if limit:
limit_per_pool = (limit // poolsize)+1
else:
limit_per_pool = None
queries = ['{} since:{} until:{}'.format(query, since, until)
for since, until in zip(dateranges[:-1], dateranges[1:])]
all_tweets = []
try:
pool = Pool(poolsize)
logger.info('queries: {}'.format(queries))
try:
for new_tweets in pool.imap_unordered(partial(query_tweets_once, limit=limit_per_pool, lang=lang), queries):
all_tweets.extend(new_tweets)
logger.info('Got {} tweets ({} new).'.format(
len(all_tweets), len(new_tweets)))
except KeyboardInterrupt:
logger.info('Program interrupted by user. Returning all tweets '
'gathered so far.')
finally:
pool.close()
pool.join()
return all_tweets