本文整理汇总了Python中joblib.cpu_count方法的典型用法代码示例。如果您正苦于以下问题:Python joblib.cpu_count方法的具体用法?Python joblib.cpu_count怎么用?Python joblib.cpu_count使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类joblib
的用法示例。
在下文中一共展示了joblib.cpu_count方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: electre1
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import cpu_count [as 别名]
def electre1(nmtx, ncriteria, nweights, p, q, njobs=None):
# determine the njobs
njobs = njobs or joblib.cpu_count()
# get the concordance and discordance info
# multiprocessing environment
with joblib.Parallel(n_jobs=njobs) as jobs:
mtx_concordance = concordance(nmtx, ncriteria, nweights, jobs)
mtx_discordance = discordance(nmtx, ncriteria, jobs)
with np.errstate(invalid='ignore'):
outrank = (
(mtx_concordance >= p) & (mtx_discordance <= q))
kernel_mask = ~outrank.any(axis=0)
kernel = np.where(kernel_mask)[0]
return kernel, outrank, mtx_concordance, mtx_discordance
# =============================================================================
# OO
# =============================================================================
示例2: __init__
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import cpu_count [as 别名]
def __init__(self, features=[], n_jobs=1, indexing_type='label',
**kwargs):
logging.info("comparing - initialize {} class".format(
self.__class__.__name__)
)
self.features = []
self.add(features)
# public
if n_jobs == -1:
self.n_jobs = cpu_count()
else:
self.n_jobs = n_jobs
self.indexing_type = indexing_type # label of position
# logging
self._i = 1
self._i_max = None
self._n = []
self._eta = []
self._output_log_total = True
# private
self._compare_functions = []
if isinstance(features, (pandas.MultiIndex, pandas.Index)):
warnings.warn(
"It seems you are using the older version of the Compare API, "
"see the documentation about how to update to the new API. "
"http://recordlinkage.readthedocs.io/"
"en/latest/ref-compare.html",
DeprecationWarning
)
示例3: cythonize_extensions
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import cpu_count [as 别名]
def cythonize_extensions(top_path, config):
"""Check that a recent Cython is available and cythonize extensions"""
_check_cython_version()
from Cython.Build import cythonize
# Fast fail before cythonization if compiler fails compiling basic test
# code even without OpenMP
basic_check_build()
# check simple compilation with OpenMP. If it fails scikit-learn will be
# built without OpenMP and the test test_openmp_supported in the test suite
# will fail.
# `check_openmp_support` compiles a small test program to see if the
# compilers are properly configured to build with OpenMP. This is expensive
# and we only want to call this function once.
# The result of this check is cached as a private attribute on the sklearn
# module (only at build-time) to be used twice:
# - First to set the value of SKLEARN_OPENMP_PARALLELISM_ENABLED, the
# cython build-time variable passed to the cythonize() call.
# - Then in the build_ext subclass defined in the top-level setup.py file
# to actually build the compiled extensions with OpenMP flags if needed.
n_jobs = 1
with contextlib.suppress(ImportError):
import joblib
if LooseVersion(joblib.__version__) > LooseVersion("0.13.0"):
# earlier joblib versions don't account for CPU affinity
# constraints, and may over-estimate the number of available
# CPU particularly in CI (cf loky#114)
n_jobs = joblib.cpu_count()
config.ext_modules = cythonize(
config.ext_modules,
nthreads=n_jobs,
compiler_directives={'language_level': 3})
示例4: test_effective_n_jobs_with_context
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import cpu_count [as 别名]
def test_effective_n_jobs_with_context():
assert_equal(threaded.effective_n_jobs_with_context(), 1, "Default to 1 job")
assert_equal(
threaded.effective_n_jobs_with_context(-1),
joblib.cpu_count(),
"Use all cores with num_jobs=-1",
)
assert_equal(
threaded.effective_n_jobs_with_context(2), 2, "Use n_jobs if specified"
)
with joblib.parallel_backend("threading"):
assert_equal(
threaded.effective_n_jobs_with_context(),
joblib.cpu_count(),
"Use all cores with context manager",
)
with joblib.parallel_backend("threading", n_jobs=3):
assert_equal(
threaded.effective_n_jobs_with_context(),
3,
"Use n_jobs from context manager",
)
with joblib.parallel_backend("threading", n_jobs=3):
assert_equal(
threaded.effective_n_jobs_with_context(2),
2,
"Use n_jobs specified rather than from context manager",
)
示例5: _get_n_jobs
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import cpu_count [as 别名]
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
示例6: parallel_load
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import cpu_count [as 别名]
def parallel_load(self, indexes, transforms=None):
"""
Load and image and optionally its label.
Load one image and its corresponding label (at training time),
or one image (at test time).
Parameters
==========
index : int
Index of the image to load.
It should in between 0 and self.nb_examples - 1
Returns
=======
either a tuple `(x, y)` or `x`, where:
- x is a numpy array of shape (height, width, nb_color_channels),
and corresponds to the image of the requested `index`.
- y is an integer, corresponding to the class of `x`.
At training time, `y_array` is given, and `load` returns
a tuple (x, y).
At test time, `y_array` is `None`, and `load` returns `x`.
"""
from skimage.io import imread
from joblib import delayed, Parallel, cpu_count
for index in indexes:
assert 0 <= index < self.nb_examples
n_jobs = cpu_count()
filenames = [
os.path.join(self.folder, '{}'.format(self.X_array[index]))
for index in indexes]
xs = Parallel(n_jobs=n_jobs, backend='threading')(
delayed(imread)(filename) for filename in filenames)
if transforms is not None:
from functools import partial
transform = partial(_image_transform, transforms=transforms)
xs = Parallel(n_jobs=n_jobs, backend='threading')(
delayed(transform)(x) for x in xs)
if self.y_array is not None:
ys = [self.y_array[index] for index in indexes]
return xs, ys
else:
return xs
示例7: simus
# 需要导入模块: import joblib [as 别名]
# 或者: from joblib import cpu_count [as 别名]
def simus(
nmtx, ncriteria, nweights,
rank_by=1, b=None, solver="pulp", njobs=None
):
# determine the njobs
njobs = njobs or joblib.cpu_count()
t_nmtx = nmtx.T
# check the b array and complete the missing values
b = np.asarray(b)
if None in b:
mins = np.min(t_nmtx, axis=1)
maxs = np.max(t_nmtx, axis=1)
auto_b = np.where(ncriteria == MAX, maxs, mins)
b = np.where(b.astype(bool), b, auto_b)
# multiprocessing environment
with joblib.Parallel(n_jobs=njobs) as jobs:
# create and execute the stages
stages, stage_results = solve_stages(
t_nmtx=t_nmtx, b=b, ncriteria=ncriteria,
solver=solver, jobs=jobs)
# first methods points
points1 = first_method(stage_results)
points2, tita_j_p, tita_j_d, doms, dom_by_crit = second_method(
stage_results, jobs)
points = [points1, points2][rank_by - 1]
ranking = rank.rankdata(points, reverse=True)
return (
ranking, stages, stage_results, points1,
points2, tita_j_p, tita_j_d, doms, dom_by_crit)
# =============================================================================
# OO
# =============================================================================