本文整理汇总了Python中multiprocess.Pool.terminate方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.terminate方法的具体用法?Python Pool.terminate怎么用?Python Pool.terminate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocess.Pool
的用法示例。
在下文中一共展示了Pool.terminate方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: pcall_mp
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import terminate [as 别名]
def pcall_mp(fun,args,cores=cores):
"""Calls a function for every input in args"""
mainpool = Pool(cores) # create pool
# print("Using",cores,"cores")
out = mainpool.map(fun,args) # return list
mainpool.terminate()
del mainpool # delete pool
return out
示例2: inner
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import terminate [as 别名]
def inner(*args):
pool = Pool(processes=1)
res = pool.apply_async(f,args)
try:
v = res.get(timeout=sec)
except Exception as inst:
print(inst)
v = None
finally:
pool.terminate()
return v
示例3: eval_EFG
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import terminate [as 别名]
def eval_EFG(self,x,num_procs=None,info=False):
from multiprocess import Pool,cpu_count
if not num_procs:
num_procs = cpu_count()
num_samples = self.parameters['num_samples']
pool = Pool(num_procs)
num = int(np.ceil(float(num_samples)/float(num_procs)))
results = list(zip(*pool.map(lambda i: self.eval_EFG_sequential(x,num,i,info),range(num_procs),chunksize=1)))
pool.terminate()
pool.join()
if not info:
assert(len(results) == 4)
else:
assert(len(results) == 5)
assert(all([len(vals) == num_procs for vals in results]))
return [sum(vals)/float(num_procs) for vals in results]
示例4: ProcessPoolExecutor
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import terminate [as 别名]
class ProcessPoolExecutor(Executor):
"""Process Pool Executor"""
def __init__(self):
super(ProcessPoolExecutor, self).__init__()
import os
from multiprocess import Pool
self.pool = Pool(os.cpu_count() or 1)
def submit(self, func, *args, **kwargs):
from concurrent.futures import Future
fut = Future()
self.tasks[fut] = self.pool.apply_async(
func, args, kwargs, fut.set_result, fut.set_exception
)
fut.add_done_callback(self.tasks.pop)
return fut
def shutdown(self, wait=True):
super(ProcessPoolExecutor, self).shutdown(wait)
self.pool.terminate()
self.pool.join()
示例5: eval_EQ
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import terminate [as 别名]
def eval_EQ(self,p,num_procs=None,quiet=True):
"""
Evaluates E[Q(p,r)] and its gradient in parallel.
Parameters
----------
p : generator powers
num_procs : number of parallel processes
quiet : flag
"""
from multiprocess import Pool,cpu_count
if not num_procs:
num_procs = cpu_count()
num_samples = self.parameters['num_samples']
pool = Pool(num_procs)
num = int(np.ceil(float(num_samples)/float(num_procs)))
results = list(zip(*pool.map(lambda i: self.eval_EQ_sequential(p,num,i,quiet),range(num_procs),chunksize=1)))
pool.terminate()
pool.join()
assert(len(results) == 2)
assert(all([len(vals) == num_procs for vals in results]))
return [sum(vals)/float(num_procs) for vals in results]
示例6: fmultiprocess
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import terminate [as 别名]
def fmultiprocess(
log,
function,
inputArray,
poolSize=False,
timeout=3600,
**kwargs):
"""multiprocess pool
**Key Arguments:**
- ``log`` -- logger
- ``function`` -- the function to multiprocess
- ``inputArray`` -- the array to be iterated over
- ``poolSize`` -- limit the number of CPU that are used in multiprocess job
- ``timeout`` -- time in sec after which to raise a timeout error if the processes have not completed
**Return:**
- ``resultArray`` -- the array of results
**Usage:**
.. code-block:: python
from fundamentals import multiprocess
# DEFINE AN INPUT ARRAY
inputArray = range(10000)
results = multiprocess(log=log, function=functionName, poolSize=10, timeout=300,
inputArray=inputArray, otherFunctionKeyword="cheese")
"""
log.debug('starting the ``multiprocess`` function')
# DEFINTE POOL SIZE - NUMBER OF CPU CORES TO USE (BEST = ALL - 1)
if not poolSize:
poolSize = psutil.cpu_count()
if poolSize:
p = Pool(processes=poolSize)
else:
p = Pool()
cpuCount = psutil.cpu_count()
chunksize = int((len(inputArray) + 1) / (cpuCount * 3))
if chunksize == 0:
chunksize = 1
# MAP-REDUCE THE WORK OVER MULTIPLE CPU CORES
if "log" in inspect.getargspec(function)[0]:
mapfunc = partial(function, log=log, **kwargs)
resultArray = p.map_async(mapfunc, inputArray, chunksize=chunksize)
else:
mapfunc = partial(function, **kwargs)
resultArray = p.map_async(mapfunc, inputArray, chunksize=chunksize)
resultArray = resultArray.get(timeout=timeout)
p.close()
p.terminate()
log.debug('completed the ``multiprocess`` function')
return resultArray
示例7: partial_dependence
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import terminate [as 别名]
#.........这里部分代码省略.........
sample=True,
n_samples=10)
examples = DataManager(examples, feature_names=self.data_set.feature_ids)
modelinstance._build_model_metadata(examples)
# if you dont pass a grid, build one.
grid = np.array(grid)
if not grid.any():
# Currently, if a given feature has fewer unique values than the value
# of grid resolution, then the grid will be set to those unique values.
# Otherwise it will take the percentile
# range according with grid_resolution bins.
grid = self.data_set.generate_grid(feature_ids,
grid_resolution=grid_resolution,
grid_range=grid_range)
else:
# want to ensure all grids have 2 axes
if len(grid.shape) == 1 and \
(StaticTypes.data_types.is_string(grid[0]) or StaticTypes.data_types.is_numeric(grid[0])):
grid = grid[:, np.newaxis].T
grid_resolution = grid.shape[1]
self.interpreter.logger.debug("Grid shape used for pdp: {}".format(grid.shape))
self.interpreter.logger.debug("Grid resolution for pdp: {}".format(grid_resolution))
# make sure data_set module is giving us correct data structure
self._check_grid(grid, feature_ids)
# generate data
data_sample = self.data_set.generate_sample(strategy=sampling_strategy,
sample=sample,
n_samples=n_samples,
bin_count=bin_count)
assert type(data_sample) == self.data_set.data_type, "Something went wrong\n" \
"Theres a type mismatch between\n" \
"the sampled data and the origina\nl" \
"training set. Check Skater.models\n"
_pdp_metadata = self._build_metadata_dict(modelinstance,
feature_ids,
self.data_set.feature_ids,
filter_classes,
variance_type)
self.interpreter.logger.debug("Shape of sampled data: {}".format(data_sample.shape))
self.interpreter.logger.debug("Feature Ids: {}".format(feature_ids))
self.interpreter.logger.debug("PD metadata: {}".format(_pdp_metadata))
# cartesian product of grid
grid_expanded = pd.DataFrame(list(product(*grid))).values
if grid_expanded.shape[0] <= 0:
empty_grid_expanded_err_msg = "Must have at least 1 pdp value" \
"grid shape: {}".format(grid_expanded.shape)
raise(exceptions.MalformedGridError(empty_grid_expanded_err_msg))
predict_fn = modelinstance._get_static_predictor()
n_jobs = None if n_jobs < 0 else n_jobs
pd_func = functools.partial(_compute_pd,
estimator_fn=predict_fn,
grid_expanded=grid_expanded,
pd_metadata=_pdp_metadata,
input_data=data_sample,
filter_classes=filter_classes)
arg_list = [i for i in range(grid_expanded.shape[0])]
executor_instance = Pool(n_jobs)
if progressbar:
self.interpreter.logger.warn("Progress bars slow down runs by 10-20%. For slightly \n"
"faster runs, do progress_bar=False")
mapper = executor_instance.imap
p = ProgressBar(len(arg_list), units='grid cells')
else:
mapper = executor_instance.map
pd_list = []
try:
if n_jobs == 1:
raise ValueError("Skipping to single processing")
for pd_row in mapper(pd_func, arg_list):
if progressbar:
p.animate()
pd_list.append(pd_row)
except:
self.interpreter.logger.warn("Multiprocessing failed, going single process")
for pd_row in map(pd_func, arg_list):
if progressbar:
p.animate()
pd_list.append(pd_row)
finally:
executor_instance.close()
executor_instance.join()
executor_instance.terminate()
if return_metadata:
return pd.DataFrame(list(pd_list)), _pdp_metadata
else:
return pd.DataFrame(list(pd_list))
示例8: test
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import terminate [as 别名]
#.........这里部分代码省略.........
sys.stdout.write('.')
print()
print()
print('Testing IMapIterator.next() with timeout:', end='')
it = pool.imap(calculatestar, TASKS)
while 1:
sys.stdout.flush()
try:
sys.stdout.write('\n\t%s' % it.next(0.02))
except StopIteration:
break
except TimeoutError:
sys.stdout.write('.')
print()
print()
#
# Testing callback
#
print('Testing callback:')
A = []
B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729]
r = pool.apply_async(mul, (7, 8), callback=A.append)
r.wait()
r = pool.map_async(pow3, range(10), callback=A.extend)
r.wait()
if A == B:
print('\tcallbacks succeeded\n')
else:
print('\t*** callbacks failed\n\t\t%s != %s\n' % (A, B))
#
# Check there are no outstanding tasks
#
assert not pool._cache, 'cache = %r' % pool._cache
#
# Check close() methods
#
print('Testing close():')
for worker in pool._pool:
assert worker.is_alive()
result = pool.apply_async(time.sleep, [0.5])
pool.close()
pool.join()
assert result.get() is None
for worker in pool._pool:
assert not worker.is_alive()
print('\tclose() succeeded\n')
#
# Check terminate() method
#
print('Testing terminate():')
pool = Pool(2)
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [10]) for i in range(10)]
pool.terminate()
pool.join()
for worker in pool._pool:
assert not worker.is_alive()
print('\tterminate() succeeded\n')
#
# Check garbage collection
#
print('Testing garbage collection:')
pool = Pool(2)
processes = pool._pool
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [10]) for i in range(10)]
del results, pool
time.sleep(0.2)
for worker in processes:
assert not worker.is_alive()
print('\tgarbage collection succeeded\n')