本文整理匯總了Python中multiprocessing.get_context方法的典型用法代碼示例。如果您正苦於以下問題:Python multiprocessing.get_context方法的具體用法?Python multiprocessing.get_context怎麽用?Python multiprocessing.get_context使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類multiprocessing
的用法示例。
在下文中一共展示了multiprocessing.get_context方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_multi_client
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def test_multi_client():
ip_config = open("rpc_ip_config_mul_client.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s 1\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server, args=(10, "rpc_ip_config_mul_client.txt"))
pclient_list = []
for i in range(10):
pclient = ctx.Process(target=start_client, args=("rpc_ip_config_mul_client.txt",))
pclient_list.append(pclient)
pserver.start()
time.sleep(1)
for i in range(10):
pclient_list[i].start()
for i in range(10):
pclient_list[i].join()
pserver.join()
示例2: test_kernel_error_checking
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
示例3: test_kv_store
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def test_kv_store():
# start 10 server and 10 client
ip_config = open("kv_ip_config.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s 10\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver_list = []
pclient_list = []
for i in range(10):
pserver = ctx.Process(target=start_server, args=(i, 10))
pserver.start()
pserver_list.append(pserver)
time.sleep(2)
for i in range(10):
pclient = ctx.Process(target=start_client, args=(10,))
pclient.start()
pclient_list.append(pclient)
for i in range(10):
pclient_list[i].join()
for i in range(10):
pserver_list[i].join()
示例4: __init__
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def __init__(self, history_cls, history_args):
super().__init__()
self._last_needed_feed_count = 0
self.results = {}
self.pending_counts = {}
# Make sure to use 'spawn' and not 'fork' to allow shared CUDA tensors
# on linux
ctx = mp.get_context('spawn')
self.close_event = ctx.Event()
self.qevent = ctx.Event()
# Queue for requests, such as getting training data
self.request_queue = ctx.Queue(10)
# Queue for updates like new acting samples and priority updates
self.update_queue = ctx.Queue(10)
# Queue for sending back request results
self.result_queue = ctx.Queue()
self._process = ctx.Process(
target=self.run,
args=(history_cls, cloudpickle.dumps(history_args)))
self._process.start()
示例5: start
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._parent_signal_conn, child_signal_conn = context.Pipe()
self._process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
# getattr prevents error while pickling an instance method.
getattr(self, "_processor_factory"),
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
示例6: start
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def start(self):
"""
Launch the process and start processing the DAG.
"""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
self._parent_channel, _child_channel = context.Pipe()
self._process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
"DagFileProcessor{}".format(self._instance_id),
self._failure_callback_requests
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
示例7: track_gpu_utils
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def track_gpu_utils(device_ids: List[int],
interval: float = 0.05,
) -> Generator[List[float], None, None]:
# Spawn a worker.
ctx = mp.get_context('spawn')
conn, conn_worker = ctx.Pipe(duplex=True)
p = ctx.Process(target=_worker, args=(device_ids, interval, conn_worker))
p.start()
conn.recv()
# GPU% will be filled to this.
gpu_utils: List[float] = []
yield gpu_utils
# Stop the worker and receive the timeline.
conn.send(None)
gpu_timeline = conn.recv()
p.join()
# Fill the GPU%.
if gpu_timeline:
gpu_utils.extend(sum(t)/len(t)/100 for t in zip(*gpu_timeline))
else:
gpu_utils.extend(0.0 for _ in device_ids)
示例8: test_after_fork
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def test_after_fork(self):
writer = datasets.PickleDatasetWriter(self.io)
writer.write(1)
writer.flush()
reader = ReaderMock(self.io)
# Assign to avoid destruction of the instance
# before creation a child process
dataset = datasets.PickleDataset(reader)
assert reader.n_hook_called == 0
ctx = multiprocessing.get_context('fork')
p = ctx.Process()
p.start()
p.join()
assert reader.n_hook_called == 1
assert reader.last_caller_pid == p.pid
# Touch to suppress "unused variable' warning
del dataset
示例9: init_mp_pool
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def init_mp_pool(reset=False):
"""Necessary because at import time, cfg might be uninitialized"""
global _mp_pool
if _mp_pool and not reset:
return _mp_pool
cfg.CONFIG_MODIFIED = False
if _mp_pool and reset:
_mp_pool.terminate()
_mp_pool = None
if cfg.PARAMS['use_mp_spawn']:
mp = multiprocessing.get_context('spawn')
else:
mp = multiprocessing
cfg_contents = cfg.pack_config()
global_lock = mp.Manager().Lock()
mpp = cfg.PARAMS['mp_processes']
_mp_pool = mp.Pool(mpp, initializer=_init_pool_globals,
initargs=(cfg_contents, global_lock))
return _mp_pool
示例10: test_set_get
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
示例11: process_images_in_process_pool
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def process_images_in_process_pool(images_to_check, number_of_cpus, model):
if number_of_cpus == -1:
processes = None
else:
processes = number_of_cpus
# macOS will crash due to a bug in libdispatch if you don't use 'forkserver'
context = multiprocessing
if "forkserver" in multiprocessing.get_all_start_methods():
context = multiprocessing.get_context("forkserver")
pool = context.Pool(processes=processes)
function_parameters = zip(
images_to_check,
itertools.repeat(model),
)
pool.starmap(test_image, function_parameters)
示例12: process_images_in_process_pool
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def process_images_in_process_pool(images_to_check, known_names, known_face_encodings, number_of_cpus, tolerance, show_distance):
if number_of_cpus == -1:
processes = None
else:
processes = number_of_cpus
# macOS will crash due to a bug in libdispatch if you don't use 'forkserver'
context = multiprocessing
if "forkserver" in multiprocessing.get_all_start_methods():
context = multiprocessing.get_context("forkserver")
pool = context.Pool(processes=processes)
function_parameters = zip(
images_to_check,
itertools.repeat(known_names),
itertools.repeat(known_face_encodings),
itertools.repeat(tolerance),
itertools.repeat(show_distance)
)
pool.starmap(test_image, function_parameters)
示例13: test_server_multiproc
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def test_server_multiproc(mocker, set_timeout, restore_signal, start_method):
mpctx = mp.get_context(start_method)
mocker.patch('aiotools.server.mp', mpctx)
started = mpctx.Value('i', 0)
terminated = mpctx.Value('i', 0)
proc_idxs = mpctx.Array('i', 3)
set_timeout(0.2, interrupt)
aiotools.start_server(myserver_multiproc, num_workers=3,
args=(started, terminated, proc_idxs))
assert started.value == 3
assert terminated.value == 3
assert list(proc_idxs) == [0, 1, 2]
assert len(mp.active_children()) == 0
示例14: test_server_multiproc_custom_stop_signals
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def test_server_multiproc_custom_stop_signals(
mocker, set_timeout, restore_signal, start_method):
mpctx = mp.get_context(start_method)
mocker.patch('aiotools.server.mp', mpctx)
started = mpctx.Value('i', 0)
terminated = mpctx.Value('i', 0)
received_signals = mpctx.Array('i', 2)
proc_idxs = mpctx.Array('i', 2)
set_timeout(0.2, interrupt_usr1)
aiotools.start_server(myserver_multiproc_custom_stop_signals,
num_workers=2,
stop_signals={signal.SIGUSR1},
args=(started, terminated, received_signals, proc_idxs))
assert started.value == 2
assert terminated.value == 2
assert list(received_signals) == [signal.SIGUSR1, signal.SIGUSR1]
assert list(proc_idxs) == [0, 1]
assert len(mpctx.active_children()) == 0
示例15: test_external_multi_process_pool_fails_on_write_enabled_checkout
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import get_context [as 別名]
def test_external_multi_process_pool_fails_on_write_enabled_checkout(self, repo, backend):
from multiprocessing import get_context
co = repo.checkout(write=True)
co.add_ndarray_column(name='writtenaset', shape=(20, 20), dtype=np.float32, backend=backend)
with co.columns['writtenaset'] as d:
for sIdx in range(20):
d[sIdx] = np.random.randn(20, 20).astype(np.float32) * 100
assert d.backend == backend
co.commit(f'master commit number 1')
co.close()
nco = repo.checkout(write=True)
ds = nco.columns['writtenaset']
keys = [i for i in range(20)]
with pytest.raises(PermissionError):
with get_context().Pool(2) as P:
cmtData = P.map(ds.get, keys)
nco.close()