本文整理匯總了Python中multiprocessing.set_start_method方法的典型用法代碼示例。如果您正苦於以下問題:Python multiprocessing.set_start_method方法的具體用法?Python multiprocessing.set_start_method怎麽用?Python multiprocessing.set_start_method使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類multiprocessing
的用法示例。
在下文中一共展示了multiprocessing.set_start_method方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: init_processes
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def init_processes(addr, port, gpu_num, backend):
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
print(rank, size)
if mp.get_start_method(allow_none=True) != 'spawn':
mp.set_start_method('spawn')
torch.cuda.set_device(rank % gpu_num)
os.environ['MASTER_ADDR'] = addr
os.environ['MASTER_PORT'] = port
os.environ['WORLD_SIZE'] = str(size)
os.environ['RANK'] = str(rank)
dist.init_process_group(backend)
print('initialize {} successfully (rank {})'.format(backend, rank))
return rank, size
示例2: main
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def main(args):
with open(args.config) as f:
if version.parse(yaml.version >= "5.1"):
config = yaml.load(f, Loader=yaml.FullLoader)
else:
config = yaml.load(f)
for k, v in config.items():
setattr(args, k, v)
# exp path
if not hasattr(args, 'exp_path'):
args.exp_path = os.path.dirname(args.config)
# dist init
if mp.get_start_method(allow_none=True) != 'spawn':
mp.set_start_method('spawn', force=True)
dist_init(args.launcher, backend='nccl')
# train
trainer = Trainer(args)
trainer.run()
示例3: test_set_get
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
示例4: watch
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def watch(args: argparse.Namespace, options: AnalysisOptions) -> int:
# Avoid fork() because we've already imported the code we're watching:
multiprocessing.set_start_method('spawn')
if not args.files:
print('No files or directories given to watch', file=sys.stderr)
return 1
try:
with StateUpdater() as state_updater:
watcher = Watcher(options, args.files, state_updater)
watcher.check_changed()
watcher.run_watch_loop()
except KeyboardInterrupt:
watcher._pool.terminate()
print()
print('I enjoyed working with you today!')
return 0
示例5: __init__
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def __init__(self):
import asyncio
import multiprocessing
import signal
import concurrent.futures
if sys.platform == 'win32':
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
multiprocessing.set_start_method('spawn')
executor = concurrent.futures.ProcessPoolExecutor()
else:
# The ProcessPoolExecutor is a barely usable for our interactive use
# case. On SIGINT any busy executor should stop. The only way how this
# does not explode is that we ignore SIGINT before spawning the process
# pool and re-enable SIGINT in every executor. In the main process we
# have to ignore BrokenProcessPool errors as we will likely hit them.
# To "prime" the process pool a dummy workload must be executed because
# the processes are spawned lazily.
loop = asyncio.get_event_loop()
origSigInt = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# fork early before process gets big
if sys.platform == 'msys':
multiprocessing.set_start_method('fork')
else:
multiprocessing.set_start_method('forkserver')
executor = concurrent.futures.ProcessPoolExecutor()
executor.submit(dummy).result()
signal.signal(signal.SIGINT, origSigInt)
loop.set_default_executor(executor)
self.__loop = loop
self.__executor = executor
示例6: main
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def main(ctx: click.Context, config_path: Path, debug: bool) -> None:
cfg = load_config(config_path, debug)
multiprocessing.set_start_method('spawn')
if ctx.invoked_subcommand is None:
cfg['manager']['pid-file'].write_text(str(os.getpid()))
log_sockpath = Path(f'/tmp/backend.ai/ipc/manager-logger-{os.getpid()}.sock')
log_sockpath.parent.mkdir(parents=True, exist_ok=True)
log_endpoint = f'ipc://{log_sockpath}'
try:
logger = Logger(cfg['logging'], is_master=True, log_endpoint=log_endpoint)
with logger:
ns = cfg['etcd']['namespace']
setproctitle(f"backend.ai: manager {ns}")
log.info('Backend.AI Gateway {0}', __version__)
log.info('runtime: {0}', env_info())
log_config = logging.getLogger('ai.backend.gateway.config')
log_config.debug('debug mode enabled.')
if cfg['manager']['event-loop'] == 'uvloop':
import uvloop
uvloop.install()
log.info('Using uvloop as the event loop backend')
try:
aiotools.start_server(server_main_logwrapper,
num_workers=cfg['manager']['num-proc'],
args=(cfg, log_endpoint))
finally:
log.info('terminated.')
finally:
if cfg['manager']['pid-file'].is_file():
# check is_file() to prevent deleting /dev/null!
cfg['manager']['pid-file'].unlink()
else:
# Click is going to invoke a subcommand.
pass
示例7: start_server
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def start_server(self, tg, optimizer, port):
"""
Starts the server with a copy of the argument for weird tensorflow multiprocessing issues
"""
try:
multiprocessing.set_start_method('spawn')
except Exception as e:
pass
self.server = Process(target=self.start_service, args=(tg, optimizer, port))
self.server.daemon = True
self.server.start()
示例8: dist_init
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def dist_init(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'mpi':
_init_dist_mpi(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError('Invalid launcher type: {}'.format(launcher))
示例9: __init__
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def __init__(self):
# append 'converter' folder into sys.path
# this helps to load custom modules
converter_folder = os.path.dirname(os.path.realpath(__file__)) + '/converter'
if converter_folder not in sys.path:
sys.path.insert(0, converter_folder)
# parse CLI options
self.config = Config()
# first action after config available - setup requested logging level
logging.basicConfig(
filename=self.config.log_file(),
level=self.config.log_level(),
format='%(asctime)s/%(created)f:%(levelname)s:%(message)s'
)
# and call parent
super().__init__(pidfile=self.config.pid_file())
# some verbosity
logging.info('Starting')
logging.debug(self.config)
logging.info("sys.path")
logging.info(pprint.pformat(sys.path))
# mp.set_start_method('forkserver')
示例10: scope_session
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def scope_session():
if int(platform.python_version_tuple()[0]) >= 3:
multiprocessing.set_start_method('forkserver')
p = multiprocessing.Process(target=dummy_func)
p.start()
p.join()
yield
示例11: dist_init
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def dist_init(port):
if mp.get_start_method(allow_none=True) != 'spawn':
mp.set_start_method('spawn')
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
if '[' in node_list:
beg = node_list.find('[')
pos1 = node_list.find('-', beg)
if pos1 < 0:
pos1 = 1000
pos2 = node_list.find(',', beg)
if pos2 < 0:
pos2 = 1000
node_list = node_list[:min(pos1, pos2)].replace('[', '')
addr = node_list[8:].replace('-', '.')
print(addr)
os.environ['MASTER_PORT'] = port
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend='nccl')
rank = dist.get_rank()
world_size = dist.get_world_size()
return rank, world_size
示例12: __init__
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def __init__(self, world_size, run_process_fn, fn_args=None):
env = os.environ.copy()
env["WORLD_SIZE"] = str(world_size)
multiprocessing.set_start_method("spawn")
# Use random file so multiple jobs can be run simultaneously
INIT_METHOD = "file:///tmp/crypten-rendezvous-{}".format(uuid.uuid1())
env["RENDEZVOUS"] = INIT_METHOD
self.processes = []
for rank in range(world_size):
process_name = "process " + str(rank)
process = multiprocessing.Process(
target=self.__class__._run_process,
name=process_name,
args=(rank, world_size, env, run_process_fn, fn_args),
)
self.processes.append(process)
if crypten.mpc.ttp_required():
ttp_process = multiprocessing.Process(
target=self.__class__._run_process,
name="TTP",
args=(
world_size,
world_size,
env,
crypten.mpc.provider.TTPServer,
None,
),
)
self.processes.append(ttp_process)
示例13: test_context
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
示例14: test_semaphore_tracker
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def test_semaphore_tracker(self):
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, 'semaphore_tracker: %r: \[Errno' % name1)
#
# Mixins
#
示例15: run
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import set_start_method [as 別名]
def run():
# ask user for difficulty
q_app = QtWidgets.QApplication([])
q_widget = QtWidgets.QWidget()
dialog = QtWidgets.QMessageBox(q_widget)
dialog.addButton('Easy', QtWidgets.QMessageBox.ActionRole)
dialog.addButton('Medium', QtWidgets.QMessageBox.ActionRole)
dialog.addButton('Hard', QtWidgets.QMessageBox.ActionRole)
dialog.addButton('Impossible', QtWidgets.QMessageBox.ActionRole)
dialog.setText('Choose difficulty:')
ret = dialog.exec_()
easy, medium, hard, impossible = range(4)
sim_time = None
if ret == easy:
sim_time = 1
elif ret == medium:
sim_time = 3
elif ret == hard:
sim_time = 5
elif ret == impossible:
sim_time = 8
mp.set_start_method('spawn')
gui_process = mp.Process(target=start_client.main)
gui_process.start()
run_game.main(BlackAgent='human', WhiteAgent='monte_carlo',
sim_time=sim_time, gui=True)