本文整理匯總了Python中os.cpu_count方法的典型用法代碼示例。如果您正苦於以下問題:Python os.cpu_count方法的具體用法?Python os.cpu_count怎麽用?Python os.cpu_count使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類os
的用法示例。
在下文中一共展示了os.cpu_count方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: add_base_arguments
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def add_base_arguments(parser, default_help):
import os
from os.path import join as path_join
home = os.environ.get('HOME')
mono_sources_default = os.environ.get('MONO_SOURCE_ROOT', '')
parser.add_argument('--verbose-make', action='store_true', default=False, help=default_help)
# --jobs supports not passing an argument, in which case the 'const' is used,
# which is the number of CPU cores on the host system.
parser.add_argument('--jobs', '-j', nargs='?', const=str(os.cpu_count()), default='1', help=default_help)
parser.add_argument('--configure-dir', default=path_join(home, 'mono-configs'), help=default_help)
parser.add_argument('--install-dir', default=path_join(home, 'mono-installs'), help=default_help)
if mono_sources_default:
parser.add_argument('--mono-sources', default=mono_sources_default, help=default_help)
else:
parser.add_argument('--mono-sources', required=True)
parser.add_argument('--mxe-prefix', default='/usr', help=default_help)
示例2: workers
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def workers(master_host, master_port, relay_socket_path, num_workers):
# Start the relay
master_redis_cfg = {'host': master_host, 'port': master_port}
relay_redis_cfg = {'unix_socket_path': relay_socket_path}
if os.fork() == 0:
RelayClient(master_redis_cfg, relay_redis_cfg).run()
return
# Start the workers
noise = SharedNoiseTable() # Workers share the same noise
num_workers = num_workers if num_workers else os.cpu_count()
logging.info('Spawning {} workers'.format(num_workers))
for _ in range(num_workers):
if os.fork() == 0:
run_worker(relay_redis_cfg, noise=noise)
return
os.wait()
示例3: auto_detect_cpus
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def auto_detect_cpus():
try:
from os import sched_getaffinity # python 3 only
def cpu_count():
return len(sched_getaffinity(0))
except ImportError:
# python 2 options
try:
from os import cpu_count
except ImportError:
from multiprocessing import cpu_count
try:
n = cpu_count()
except NotImplementedError: # pragma: no cov
n = None # pragma: no cov
return n if n else 1
示例4: enable_mining
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def enable_mining(proxy):
cores = os.cpu_count()
if cores > 2:
threads_count = cores - 2
else:
threads_count = 1
tries = 0
while True:
try:
proxy.setgenerate(True, threads_count)
break
except (RPCError, HttpError) as e:
print(e, " Waiting chain startup\n")
time.sleep(10)
tries += 1
if tries > 30:
raise ChildProcessError("Node did not start correctly, aborting\n")
示例5: cpu_count
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def cpu_count(): # pragma: no cover
try:
import os
# doesn't exist in python2, and can return None
return os.cpu_count() or 1
except AttributeError:
pass
try:
import multiprocessing
# doesn't have to be implemented
return multiprocessing.cpu_count()
except NotImplementedError:
pass
return 1
示例6: cpu_count
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def cpu_count(logical=True):
"""Return the number of logical CPUs in the system (same as
os.cpu_count() in Python 3.4).
If *logical* is False return the number of physical cores only
(e.g. hyper thread CPUs are excluded).
Return None if undetermined.
The return value is cached after first call.
If desired cache can be cleared like this:
>>> psutil.cpu_count.cache_clear()
"""
if logical:
ret = _psplatform.cpu_count_logical()
else:
ret = _psplatform.cpu_count_physical()
if ret is not None and ret < 1:
ret = None
return ret
示例7: workers
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def workers(master_host, master_port, relay_socket_path, num_workers):
# Start the relay
master_redis_cfg = {'host': master_host, 'port': master_port}
relay_redis_cfg = {'unix_socket_path': relay_socket_path}
if os.fork() == 0:
RelayClient(master_redis_cfg, relay_redis_cfg).run()
return
# Start the workers
noise = SharedNoiseTable() # Workers share the same noise
num_workers = num_workers if num_workers else os.cpu_count()
print('Spawning workers')
logging.info('Spawning {} workers'.format(num_workers))
for _ in range(num_workers):
if os.fork() == 0:
run_worker(relay_redis_cfg, noise=noise)
return
os.wait()
示例8: __build_qcustomplot_library
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def __build_qcustomplot_library(self):
if WINDOWS_HOST:
qcustomplot_static = join(self.build_temp, 'release', 'qcustomplot.lib')
else:
qcustomplot_static = join(self.build_temp, 'libqcustomplot.a')
if exists(qcustomplot_static):
return
os.makedirs(self.build_temp, exist_ok=True)
os.chdir(self.build_temp)
print('Make static qcustomplot library...')
self.spawn([self.qmake, join(ROOT, 'QCustomPlot/src/qcp-staticlib.pro')])
# AFAIK only nmake does not support -j option
has_multiprocess = not(WINDOWS_HOST and "nmake"in self.make)
make_cmdline = [self.make]
if has_multiprocess:
make_cmdline.extend(('-j', str(os.cpu_count())))
make_cmdline.append('release')
self.spawn(make_cmdline)
os.chdir(ROOT)
self.static_lib = qcustomplot_static
# Possibly it's hack
qcustomplot_ext = self.extensions[0]
qcustomplot_ext.extra_objects = [qcustomplot_static]
示例9: handle_request
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def handle_request(self, reader, writer):
req_host, req_port = writer.get_extra_info('peername')
peername = f'{req_host}:{req_port}'
self._logger.info(f'Connection from {peername}')
data = await reader.readline()
nw, port, worker_loop, func_pickle, data_pickle = data.split()
num_workers = int(nw) or os.cpu_count()
self._logger.info(
f'Starting up {num_workers} processors for {peername}')
# start processors that will connect back to the remote server
asyncio.gather(
*[asyncio.create_subprocess_exec(
'distex_proc',
'-H', req_host,
'-p', port,
'-l', worker_loop,
'-f', func_pickle,
'-d', data_pickle,
stdout=None, stderr=None)
for _ in range(num_workers)])
writer.close()
示例10: get_num_workers
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def get_num_workers(jobs):
"""
Parameters
----------
jobs How many jobs to be paralleled. Negative or 0 means number of cpu cores left.
Returns
-------
How many subprocess to be used
"""
num_workers = jobs
if num_workers <= 0:
num_workers = os.cpu_count() + jobs
if num_workers < 0 or num_workers > os.cpu_count():
raise RuntimeError("System doesn't have so many cpu cores: {} vs {}".format(jobs, os.cpu_count()))
return num_workers
示例11: _add_from_urls
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def _add_from_urls(
self, dataset, urls, destination, destination_names, extract, progress
):
files = []
max_workers = min(os.cpu_count() - 1, 4) or 1
with concurrent.futures.ThreadPoolExecutor(max_workers) as executor:
futures = {
executor.submit(
self._add_from_url,
dataset=dataset,
url=url,
destination=destination,
extract=extract,
filename=name,
progress=progress
)
for url, name in zip(urls, destination_names)
}
for future in concurrent.futures.as_completed(futures):
files.extend(future.result())
return files
示例12: main
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def main():
print("Main Process PID: {}".format(multiprocessing.current_process().pid))
myProcess = MyProcess()
myProcess.start()
myProcess.join()
processes = []
for i in range(os.cpu_count()):
process = MyProcess()
processes.append(process)
process.start()
for process in processes:
process.join()
示例13: cpu_count
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def cpu_count():
num_processes = os.cpu_count()
if num_processes is None:
num_processes = 2
return num_processes
示例14: build_project
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def build_project(project_dir, num_processes=0):
subprocess.check_call([NDKBUILD, '-j%d' % cpu_count(), '-C', project_dir])
示例15: __init__
# 需要導入模塊: import os [as 別名]
# 或者: from os import cpu_count [as 別名]
def __init__(self,
model: BaseRLModel = PPO2,
policy: BasePolicy = MlpLnLstmPolicy,
reward_strategy: BaseRewardStrategy = IncrementalProfit,
exchange_args: Dict = {},
**kwargs):
self.logger = kwargs.get('logger', init_logger(__name__, show_debug=kwargs.get('show_debug', True)))
self.Model = model
self.Policy = policy
self.Reward_Strategy = reward_strategy
self.exchange_args = exchange_args
self.tensorboard_path = kwargs.get('tensorboard_path', None)
self.input_data_path = kwargs.get('input_data_path', 'data/input/coinbase-1h-btc-usd.csv')
self.params_db_path = kwargs.get('params_db_path', 'sqlite:///data/params.db')
self.date_format = kwargs.get('date_format', ProviderDateFormat.DATETIME_HOUR_24)
self.model_verbose = kwargs.get('model_verbose', 1)
self.n_envs = kwargs.get('n_envs', os.cpu_count())
self.n_minibatches = kwargs.get('n_minibatches', self.n_envs)
self.train_split_percentage = kwargs.get('train_split_percentage', 0.8)
self.data_provider = kwargs.get('data_provider', 'static')
self.initialize_data()
self.initialize_optuna()
self.logger.debug(f'Initialize RLTrader: {self.study_name}')