本文整理汇总了Python中multiprocessing.pool.Pool.terminate方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.terminate方法的具体用法?Python Pool.terminate怎么用?Python Pool.terminate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.pool.Pool
的用法示例。
在下文中一共展示了Pool.terminate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _itergroundings
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def _itergroundings(self, simplify=False, unsatfailure=False):
global global_bpll_grounding
global_bpll_grounding = self
if self.multicore:
pool = Pool(maxtasksperchild=1)
try:
for gndresult in pool.imap(with_tracing(create_formula_groundings), self.formulas):
for fidx, stat in gndresult:
for (varidx, validx, val) in stat:
self._varidx2fidx[varidx].add(fidx)
self._addstat(fidx, varidx, validx, val)
checkmem()
yield None
except CtrlCException as e:
pool.terminate()
raise e
pool.close()
pool.join()
else:
for gndresult in imap(create_formula_groundings, self.formulas):
for fidx, stat in gndresult:
for (varidx, validx, val) in stat:
self._varidx2fidx[varidx].add(fidx)
self._addstat(fidx, varidx, validx, val)
yield None
示例2: work
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def work(host, port, processes, threads, times):
pool = Pool(processes,
lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
p = Process(target=progress)
p.daemon = True
start = time.time()
try:
for chunk in divide(times, processes):
pool.apply_async(thread, (host, port, threads, chunk))
p.start()
pool.close()
pool.join()
p.terminate()
p.join()
except KeyboardInterrupt:
pool.terminate()
p.terminate()
p.join()
pool.join()
return time.time() - start
示例3: _itergroundings
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def _itergroundings(self, simplify=True, unsatfailure=True):
# generate all groundings
if not self.formulas:
return
global global_fastConjGrounding
global_fastConjGrounding = self
batches = list(rndbatches(self.formulas, 20))
batchsizes = [len(b) for b in batches]
if self.verbose:
bar = ProgressBar(width=100, steps=sum(batchsizes), color='green')
i = 0
if self.multicore:
pool = Pool()
try:
for gfs in pool.imap(with_tracing(create_formula_groundings), batches):
if self.verbose:
bar.inc(batchsizes[i])
bar.label(str(cumsum(batchsizes, i + 1)))
i += 1
for gf in gfs: yield gf
except Exception as e:
logger.error('Error in child process. Terminating pool...')
pool.close()
raise e
finally:
pool.terminate()
pool.join()
else:
for gfs in imap(create_formula_groundings, batches):
if self.verbose:
bar.inc(batchsizes[i])
bar.label(str(cumsum(batchsizes, i + 1)))
i += 1
for gf in gfs: yield gf
示例4: run
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def run():
setup_logger()
logger.info('Started')
queue = multiprocessing.Queue(maxsize=EVENT_QUEUE_MAX_SIZE)
pool = Pool(processes=WORKERS,
initializer=worker,
initargs=(queue,))
event_handler = EventHandler(queue)
observer = init_observer()
try:
delete_all_files(FRAMES_PATH)
observer.schedule(event_handler, path=FRAMES_PATH, recursive=True)
signal.signal(signal.SIGINT, signal_handler)
observer.start()
while True:
pool._maintain_pool() #restart workers if needed
time.sleep(1)
now = datetime.datetime.now()
if now - event_handler.last_event > datetime.timedelta(minutes=1):
logger.warning("No events received in the last minute.")
# Sometimes watchdog stops receiving events.
# We exit, so the process can be restarted.
break
except KeyboardInterrupt as err:
logger.warning("Keyboard interruption")
except Exception as err:
logger.exception(err)
finally:
observer.stop()
observer.join()
pool.terminate()
logger.warning("Bye")
示例5: run
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def run(self, test_name=None, db_adapter=None):
if db_adapter is None:
db_adapter = DEFAULT_DATABASE_ADAPTER
if test_name is None:
test_name = '_'.join([db_adapter, datetime.datetime.now().strftime("%Y-%m-%d %H:%M")])
print ''.join(['Running "', test_name, '" test'])
print 'Prepare database'
adapter = adapter_factory(db_adapter)
adapter.prepare_db()
test_id = adapter.create_new_test(test_name)
print ''
print 'Create user documents'
pool = Pool(processes=10)
params = [{'user_id': i, 'docs_per_user': DOCS_PER_USER, 'db_adapter': db_adapter}
for i in range(1, USERS_COUNT + 1)]
start = time.time()
try:
pool.map(create_users, params)
print 'Full time:', time.time() - start
finally:
pool.terminate()
del pool
print 'OK! Users were created!'
print ''
for i in range(1, MAX_PROCESSES + 1):
print 'Run test with %d proceses' % i
pool = Pool(processes=i)
params = [{'user_id': j, 'db_adapter': db_adapter} for j in range(1, USERS_COUNT + 1)]
start = time.time()
try:
res = pool.map(update_users, params)
full_time = time.time() - start
finally:
pool.terminate()
del pool
print 'Test is finished! Save results'
print ''
adapter.save_results(test_id, res, i)
print 'Full time:', full_time
print ''
print 'Finish!'
示例6: postprocd
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def postprocd(self,func,nthreads=1,pool=None):
"""
Post-process some values into this chain.
Args:
func : a function which accepts all the keys in the chain
and returns a dictionary of new keys to add. `func` must accept *all*
keys in the chain, if there are ones you don't need, capture them
with **_ in its call signature, e.g. to add in a parameter 'b'
which is 'a' squared, use postprocd(lambda a,**_: {'b':a**2})
nthreads : the number of threads to use
pool : any worker pool which has a pool.map function.
default: multiprocessing.Pool(nthreads)
Returns:
A new chain with the new values post-processed in.
Does not alter the original chain. If for some rows in the
chain `func` did not return all the keys, these will be filled
in with `nan`.
Note:
This repeatedly calls `func` on rows in the chain, so its very inneficient
if you already have a vectorized version of your post-processing function.
`postprocd` is mostly useful for slow non-vectorized post-processing functions,
allowing convenient use of the `nthreads` option to this function.
For the default implementation of `pool`, `func` must be picklable,
meaning it must be a module-level function.
"""
if pool is not None: _pool = pool
elif nthreads!=1: _pool = Pool(nthreads)
else: _pool = None
mp=map if _pool is None else _pool.map
try:
dat = mp(partial(_postprocd_helper,func),self.iterrows())
finally:
if pool is None and _pool is not None: _pool.terminate()
c=self.copy()
allkeys = set(chain(*[d.keys() for d in dat]))
c.update({k:array([d.get(k,nan) for d in dat]) for k in allkeys})
return c
示例7: run
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def run(config_uri, app_name=None, username=None, types=(), batch_size=500, processes=None):
# multiprocessing.get_context is Python 3 only.
from multiprocessing import get_context
from multiprocessing.pool import Pool
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('snovault').setLevel(logging.DEBUG)
testapp = internal_app(config_uri, app_name, username)
connection = testapp.app.registry[CONNECTION]
uuids = [str(uuid) for uuid in connection.__iter__(*types)]
transaction.abort()
logger.info('Total items: %d' % len(uuids))
pool = Pool(
processes=processes,
initializer=initializer,
initargs=(config_uri, app_name, username),
context=get_context('forkserver'),
)
all_results = []
try:
for result in pool.imap_unordered(worker, batched(uuids, batch_size), chunksize=1):
results = result['results']
errors = sum(error for item_type, path, update, error in results)
updated = sum(update for item_type, path, update, error in results)
logger.info('Batch: Updated %d of %d (errors %d)' %
(updated, len(results), errors))
all_results.extend(results)
finally:
pool.terminate()
pool.join()
def result_item_type(result):
# Ensure we always return a string
return result[0] or ''
for item_type, results in itertools.groupby(
sorted(all_results, key=result_item_type), key=result_item_type):
results = list(results)
errors = sum(error for item_type, path, update, error in results)
updated = sum(update for item_type, path, update, error in results)
logger.info('Collection %s: Updated %d of %d (errors %d)' %
(item_type, updated, len(results), errors))
示例8: multiprocess_all_chromosomes
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def multiprocess_all_chromosomes(func, cls, *args, **kwargs):
'''
Convenience method for splitting up queries based on tag id.
'''
processes = current_settings.ALLOWED_PROCESSES
set_chromosome_lists(cls, use_table=kwargs.get('use_table', None))
p = Pool(processes)
try:
for chr_list in current_settings.CHR_LISTS:
p.apply_async(func, args=[cls, chr_list, ] + list(args))
p.close()
p.join()
except Exception as e:
print('Terminating pool.')
p.terminate()
raise e
示例9: join
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def join(self):
try:
while True:
total_tasks = len(self.results)
done_tasks = 0
for result in self.results:
if result.ready():
done_tasks += 1
if done_tasks == total_tasks:
self.progress('[%d task(s) completed, %d process(es)]',
done_tasks, self._processes)
break
else:
self.progress('[%d task(s) completed, %d remaining, %d process(es)]',
done_tasks, total_tasks - done_tasks, self._processes)
time.sleep(0.001)
except KeyboardInterrupt:
NativePool.terminate(self)
return NativePool.join(self)
示例10: main
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def main():
global sqs_conn, sqs_queue
args = parse_args()
start_time = datetime.datetime.utcnow()
first_start_time = start_time
print "first start: %s" % first_start_time
with open(args.get('config'), 'r') as f:
config = json.load(f)
sqs_config = config.get('sqs')
sqs_conn = boto.sqs.connect_to_region(**sqs_config)
queue_name = 'baas20sr_usea_baas20sr_usea_index_all_dead'
sqs_queue = sqs_conn.get_queue(queue_name)
last_size = sqs_queue.count()
print 'Last Size: ' + str(last_size)
pool = Pool(10)
keep_going = True
while keep_going:
sqs_messages = sqs_queue.get_messages(
num_messages=10,
visibility_timeout=10,
wait_time_seconds=10)
if len(sqs_messages) > 0:
pool.map(check_exists, sqs_messages)
else:
print 'DONE!'
pool.terminate()
keep_going = False
示例11: image_urls
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def image_urls(self):
""" Iterates over json obj, gets image links
Creates pool of workers, creates new workers """
json_obj = self.jsonify()
for post in json_obj['posts']:
if 'ext' in post:
self.total_count.value += 1
self.thread_name = json_obj['posts'][0]['semantic_url']
for post in json_obj['posts']:
if 'ext' in post:
filename = str(post['tim']) + post['ext']
image_url = 'https://i.4cdn.org/{board}/{file}'.format(
board=self.board, file=filename)
self.filename.append(filename)
self.downloads.append(image_url)
self.download_image(image_url, filename)
with self.counter.get_lock():
self.counter.value += 1
update_progress(self.counter.value, self.total_count.value)
manager = Manager()
pool_data = manager.list(self.downloads)
partial_data = partial(self.download_image, pool_data)
pool = Pool(self.workers)
pool_map = pool.map_async(partial_data, self.filename)
try:
pool.close()
pool.join()
except KeyboardInterrupt:
print("Aborting")
pool.terminate()
pool.join()
示例12: competition
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def competition():
p = Pool(processes = 4)
total = 0
scores = {}
mycmd = 'examples.Greedy'
competitors = ['examples.Greedy']
levels = ['map00']
pairs = itertools.product([mycmd], competitors)
games = list(itertools.product(levels, pairs))
print "Running against %i commanders on %i levels, for a total of %i games.\n" % (len(competitors), len(levels), len(games))
try:
for level, results in p.map(run, games):
for (_, bot), score in results.items():
scores.setdefault(bot, [0, 0, 0, 0, 0])
scores[bot][0] += score[0] # Flags captured.
scores[bot][1] += score[1] # Flags conceded.
scores[bot][2] += int(score[0] > score[1]) # Win.
scores[bot][3] += int(score[0] == score[1]) # Draw.
scores[bot][4] += int(score[1] > score[0]) # Loss.
total += 1
except KeyboardInterrupt:
print "\nTerminating competition due to keyboard interrupt."
p.terminate()
p.join()
else:
print "\n"
for r, s in sorted(scores.items(), key = lambda i: i[1][2]*30 + i[1][3]*10 + i[1][0] - i[1][1], reverse = True):
nick = r.replace('Commander', '')
if nick in mycmd: continue
print "{}\n\tCaptured {} flags and conceded {}.\n\tWon {}, drew {} and lost {}.\n".format(nick.upper(), *s)
print '\n\nAll matches played against {}; best opponent at top of list.\n'.format(mycmd)
示例13: raster2pyramid
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
#.........这里部分代码省略.........
os.path.dirname(os.path.realpath(__file__)),
"tilify.py"
)
with rasterio.open(input_file, "r") as input_raster:
output_bands = input_raster.count
input_dtype = input_raster.dtypes[0]
output_dtype = input_raster.dtypes[0]
nodataval = input_raster.nodatavals[0]
if not nodataval:
nodataval = 0
if output_format == "PNG":
if output_bands > 3:
output_bands = 3
output_dtype = 'uint8'
scales_minmax = ()
if scale_method == "dtype_scale":
for index in range(1, output_bands+1):
scales_minmax += (DTYPE_RANGES[input_dtype], )
elif scale_method == "minmax_scale":
for index in range(1, output_bands+1):
band = input_raster.read(index)
scales_minmax += ((band.min(), band.max()), )
elif scale_method == "crop":
for index in range(1, output_bands+1):
scales_minmax += ((0, 255), )
if input_dtype == "uint8":
scale_method = None
scales_minmax = ()
for index in range(1, output_bands+1):
scales_minmax += ((None, None), )
# Create configuration
config = {}
config.update(
process_file=process_file,
output={
"path": output_dir,
"format": output_format,
"type": pyramid_type,
"bands": output_bands,
"dtype": output_dtype
},
scale_method=scale_method,
scales_minmax=scales_minmax,
input_files={"raster": input_file},
config_dir=os.getcwd(),
process_minzoom=minzoom,
process_maxzoom=maxzoom,
nodataval=nodataval,
resampling=resampling,
bounds=bounds,
pixelbuffer=5,
baselevel={"zoom": maxzoom, "resampling": resampling}
)
LOGGER.info("preparing process ...")
try:
mapchete = Mapchete(
MapcheteConfig(
config,
zoom=zoom,
bounds=bounds
)
)
except PyCompileError as error:
print error
return
except:
raise
# Prepare output directory and logging
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logging.config.dictConfig(get_log_config(mapchete))
for zoom in reversed(range(minzoom, maxzoom+1)):
# Determine work tiles and run
work_tiles = mapchete.get_work_tiles(zoom)
func = partial(_worker,
mapchete=mapchete,
overwrite=overwrite
)
pool = Pool()
try:
pool.map_async(func, work_tiles)
pool.close()
except KeyboardInterrupt:
LOGGER.info(
"Caught KeyboardInterrupt, terminating workers"
)
pool.terminate()
break
except:
raise
finally:
pool.close()
pool.join()
示例14: benchmark_ensembles
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
def benchmark_ensembles(print_to_file=False):
# load all possible models and datasets
datasets = __load_datasets()
models = Classifier.all_models()
voting_systems = VotingSystem.all_voting_systems()
ensemble_types = Ensemble.all_ensemble_types()
# output format
if print_to_file == True:
output_string_format = "{0},{1},{8},{2},{3},{4},{5},{6},{7}"
else:
output_string_format = "{0: <15}\t{1: <28}\t{8: <15}{2: <13}\t{3: <18}\t{4: <28}\t{5: <22}\t{6: <15}\t{7: <22}"
# print output header
print(output_string_format
.format('score','dataset','dataset_size','number_of_classes','model','ensemble','ensemble_size','voting_system', 'feature_count'))
ensamble_sizes = [1, 2, 4, 6, 8, 12, 15, 20, 25, 30, 40, 50, 60, 70, 100][::-1]
process_jobs_args = []
l = Lock()
for classifiers_in_ensamble in ensamble_sizes:
for dataset in datasets:
# load mock_classifier
X, Y = dataset.load()
# dataset info
dataset_size = X.shape[0]
classes_count = np.unique(Y).size
# Split dataset into kfold datasets
kfold_labels = __kfold_labels(Y)
# Check if every class is in every set after kfold
for train_index, test_index in kfold_labels:
Y_train, Y_test = Y[train_index], Y[test_index]
assert np.unique(Y_train).size == classes_count
# evaluate models
for model in models:
# score enembles based on current model
for voting_system in voting_systems:
for ensemble_type in ensemble_types:
feature_labels = None
if ensemble_type == RandomSubspace:
feature_labels = __load_feature_labels(dataset.path())
# create ensemble
ensemble = ensemble_type(voting_system, type(model), classifiers_in_ensamble)
job_args = ensemble, X, Y, kfold_labels, dataset, model, output_string_format, classifiers_in_ensamble, voting_system.name(), feature_labels
process_jobs_args.append(job_args)
pool = Pool(initializer=__init_proc, initargs=(l, ), processes=None)
pool.map(__model_score_job, process_jobs_args)
pool.terminate()
pool = None
示例15: list
# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import terminate [as 别名]
levels = ['map00', 'map01', 'map10', 'map11', 'map20', 'map30']
pairs = itertools.product([mycmd], competitors)
games = list(itertools.product(levels, pairs))
print "Running against %i commanders on %i levels, for a total of %i games.\n" % (len(competitors), len(levels), len(games))
try:
for level, results in p.map(run, games):
for (_, bot), score in results.items():
scores.setdefault(bot, [0, 0, 0, 0, 0])
scores[bot][0] += score[0] # Flags captured.
scores[bot][1] += score[1] # Flags conceded.
scores[bot][2] += int(score[0] > score[1]) # Win.
scores[bot][3] += int(score[0] == score[1]) # Draw.
scores[bot][4] += int(score[1] > score[0]) # Loss.
total += 1
except KeyboardInterrupt:
print "\nTerminating competition due to keyboard interrupt."
p.terminate()
p.join()
else:
print "\n"
for r, s in sorted(scores.items(), key = lambda i: i[1][2]*30 + i[1][3]*10 + i[1][0] - i[1][1], reverse = True):
nick = r.replace('Commander', '')
if nick in mycmd: continue
print "{}\n\tCaptured {} flags and conceded {}.\n\tWon {}, drew {} and lost {}.\n".format(nick.upper(), *s)
print '\n\nAll matches played against {}; best opponent at top of list.\n'.format(mycmd)