本文整理汇总了Python中os.sched_getaffinity方法的典型用法代码示例。如果您正苦于以下问题:Python os.sched_getaffinity方法的具体用法?Python os.sched_getaffinity怎么用?Python os.sched_getaffinity使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类os
的用法示例。
在下文中一共展示了os.sched_getaffinity方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def main():
tids_train = pd.read_csv('data/train_labels.csv', index_col=0).index
tids_test = sorted(glob.glob('data/crowdai_fma_test/*.mp3'))
tids_test = [path.split('/')[-1][:-4] for path in tids_test]
tids = tids_train.append(pd.Index(tids_test, name='track_id'))
features = pd.DataFrame(index=tids, columns=columns(), dtype=np.float32)
# More than usable CPUs to be CPU bound, not I/O bound. Beware memory.
try:
nb_workers = int(1.5 * len(os.sched_getaffinity(0)))
except AttributeError as e:
nb_workers = 10
print('Working with {} processes.'.format(nb_workers))
pool = multiprocessing.Pool(nb_workers)
it = pool.imap_unordered(compute_features, tids)
for row in tqdm(it, total=tids.size):
features.loc[row.name] = row
save(features, 10)
test(features, 10)
示例2: available_cpu_cores
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def available_cpu_cores(fallback: int = 1) -> int:
"""
Returns the number (an int) of CPU cores available to this **process**, if
determinable, otherwise the number of CPU cores available to the
**computer**, if determinable, otherwise the *fallback* number (which
defaults to 1).
"""
try:
# Note that this is the correct function to use, not os.cpu_count(), as
# described in the latter's documentation.
#
# The reason, which the documentation does not detail, is that
# processes may be pinned or restricted to certain CPUs by setting
# their "affinity". This is not typical except in high-performance
# computing environments, but if it is done, then a computer with say
# 24 total cores may only allow our process to use 12. If we tried to
# naively use all 24, we'd end up with two threads across the 12 cores.
# This would degrade performance rather than improve it!
return len(os.sched_getaffinity(0))
except:
# cpu_count() returns None if the value is indeterminable.
return os.cpu_count() or fallback
示例3: test_cpu_count_affinity
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def test_cpu_count_affinity():
if not hasattr(os, 'sched_getaffinity') or not hasattr(shutil, 'which'):
pytest.skip()
taskset_bin = shutil.which('taskset')
python_bin = shutil.which('python')
if taskset_bin is None or python_bin is None:
raise pytest.skip()
try:
os.sched_getaffinity(0)
except NotImplementedError:
pytest.skip()
res = check_output([taskset_bin, '-c', '0',
python_bin, '-c', cpu_count_cmd])
assert res.strip().decode('utf-8') == '1'
示例4: auto_detect_cpus
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def auto_detect_cpus():
try:
from os import sched_getaffinity
except ImportError:
if os.environ.get("TRAVIS") == "true":
# workaround https://bitbucket.org/pypy/pypy/issues/2375
return 2
try:
from os import cpu_count
except ImportError:
from multiprocessing import cpu_count
else:
def cpu_count():
return len(sched_getaffinity(0))
try:
n = cpu_count()
except NotImplementedError:
return 1
return n if n else 1
示例5: get_num_build_jobs
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of
processors on the system, with a maximum of 8 (to prevent
overloading the system if there a lot of CPUs).
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeError:
cpu_count = multiprocessing.cpu_count()
cpu_count = min(cpu_count, 8)
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
示例6: test_cpu_affinity
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def test_cpu_affinity(self):
p = psutil.Process()
initial = p.cpu_affinity()
assert initial, initial
self.addCleanup(p.cpu_affinity, initial)
if hasattr(os, "sched_getaffinity"):
self.assertEqual(initial, list(os.sched_getaffinity(p.pid)))
self.assertEqual(len(initial), len(set(initial)))
all_cpus = list(range(len(psutil.cpu_percent(percpu=True))))
# Work around travis failure:
# https://travis-ci.org/giampaolo/psutil/builds/284173194
for n in all_cpus if not TRAVIS else initial:
p.cpu_affinity([n])
self.assertEqual(p.cpu_affinity(), [n])
if hasattr(os, "sched_getaffinity"):
self.assertEqual(p.cpu_affinity(),
list(os.sched_getaffinity(p.pid)))
# also test num_cpu()
if hasattr(p, "num_cpu"):
self.assertEqual(p.cpu_affinity()[0], p.num_cpu())
# [] is an alias for "all eligible CPUs"; on Linux this may
# not be equal to all available CPUs, see:
# https://github.com/giampaolo/psutil/issues/956
p.cpu_affinity([])
if LINUX:
self.assertEqual(p.cpu_affinity(), p._proc._get_eligible_cpus())
else:
self.assertEqual(p.cpu_affinity(), all_cpus)
if hasattr(os, "sched_getaffinity"):
self.assertEqual(p.cpu_affinity(),
list(os.sched_getaffinity(p.pid)))
#
self.assertRaises(TypeError, p.cpu_affinity, 1)
p.cpu_affinity(initial)
# it should work with all iterables, not only lists
p.cpu_affinity(set(all_cpus))
p.cpu_affinity(tuple(all_cpus))
示例7: threads_to_use
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def threads_to_use():
"""Returns the number of cores we are allowed to run on"""
if hasattr(os, 'sched_getaffinity'):
cores = len(os.sched_getaffinity(0))
else:
cores = os.cpu_count()
return min(_max_threads, cores)
示例8: test_cpu_affinity
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def test_cpu_affinity(self):
p = psutil.Process()
initial = p.cpu_affinity()
assert initial, initial
self.addCleanup(p.cpu_affinity, initial)
if hasattr(os, "sched_getaffinity"):
self.assertEqual(initial, list(os.sched_getaffinity(p.pid)))
self.assertEqual(len(initial), len(set(initial)))
all_cpus = list(range(len(psutil.cpu_percent(percpu=True))))
# Work around travis failure:
# https://travis-ci.org/giampaolo/psutil/builds/284173194
for n in all_cpus if not TRAVIS else initial:
p.cpu_affinity([n])
self.assertEqual(p.cpu_affinity(), [n])
if hasattr(os, "sched_getaffinity"):
self.assertEqual(p.cpu_affinity(),
list(os.sched_getaffinity(p.pid)))
# also test num_cpu()
if hasattr(p, "num_cpu"):
self.assertEqual(p.cpu_affinity()[0], p.num_cpu())
# [] is an alias for "all eligible CPUs"; on Linux this may
# not be equal to all available CPUs, see:
# https://github.com/giampaolo/psutil/issues/956
p.cpu_affinity([])
if LINUX:
self.assertEqual(p.cpu_affinity(), p._proc._get_eligible_cpus())
else:
self.assertEqual(p.cpu_affinity(), all_cpus)
if hasattr(os, "sched_getaffinity"):
self.assertEqual(p.cpu_affinity(),
list(os.sched_getaffinity(p.pid)))
#
self.assertRaises(TypeError, p.cpu_affinity, 1)
p.cpu_affinity(initial)
# it should work with all iterables, not only lists
if not TRAVIS:
p.cpu_affinity(set(all_cpus))
p.cpu_affinity(tuple(all_cpus))
示例9: _worker_count
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def _worker_count():
cpu_count = 1
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeError:
cpu_count = multiprocessing.cpu_count()
return cpu_count
示例10: cpu_count
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def cpu_count():
# The set of CPUs accessible to the current process (pid 0).
cpu_set = os.sched_getaffinity(0)
return len(cpu_set)
示例11: num_cpus
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def num_cpus() -> Optional[int]:
"Get number of cpus"
try:
return len(os.sched_getaffinity(0))
except AttributeError:
return os.cpu_count()
示例12: cpu_count
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def cpu_count(self) -> int:
return len(os.sched_getaffinity(0))
示例13: _workers_count
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def _workers_count():
cpu_count = 0
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeError:
cpu_count = os.cpu_count()
return cpu_count * 4
示例14: get_num_build_jobs
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS checked and if that is unset it returns 1.
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeError:
cpu_count = multiprocessing.cpu_count()
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
示例15: log_execution_env_state
# 需要导入模块: import os [as 别名]
# 或者: from os import sched_getaffinity [as 别名]
def log_execution_env_state(app_args, gitroot='.'):
"""Log information about the execution environment.
It is recommeneded to log this information so it can be used for referencing
at a later time.
Args:
app_args (dict): the command line arguments passed to the application
git_root: the path to the .git root directory
"""
def log_git_state():
"""Log the state of the git repository.
It is useful to know what git tag we're using, and if we have outstanding code.
"""
repo = Repo(gitroot)
assert not repo.bare
if repo.is_dirty():
logger.debug("Git is dirty")
try:
branch_name = repo.active_branch.name
except TypeError:
branch_name = "None, Git is in 'detached HEAD' state"
logger.debug("Active Git branch: %s", branch_name)
logger.debug("Git commit: %s" % repo.head.commit.hexsha)
logger.debug("Number of CPUs: %d", len(os.sched_getaffinity(0)))
logger.debug("Number of GPUs: %d", torch.cuda.device_count())
logger.debug("CUDA version: %s", torch.version.cuda)
logger.debug("CUDNN version: %s", torch.backends.cudnn.version())
logger.debug("Kernel: %s", platform.release())
if HAVE_LSB:
logger.debug("OS: %s", lsb_release.get_lsb_information()['DESCRIPTION'])
logger.debug("Python: %s", sys.version)
logger.debug("PyTorch: %s", torch.__version__)
logger.debug("Numpy: %s", np.__version__)
log_git_state()
logger.debug("App args: %s", app_args)