本文整理汇总了Python中torch.multiprocessing.Process方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.Process方法的具体用法?Python multiprocessing.Process怎么用?Python multiprocessing.Process使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.multiprocessing
的用法示例。
在下文中一共展示了multiprocessing.Process方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_spec_and_run
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def read_spec_and_run(spec_file, spec_name, lab_mode):
'''Read a spec and run it in lab mode'''
logger.info(f'Running lab spec_file:{spec_file} spec_name:{spec_name} in mode:{lab_mode}')
if lab_mode in TRAIN_MODES:
spec = spec_util.get(spec_file, spec_name)
else: # eval mode
if '@' in lab_mode:
lab_mode, prename = lab_mode.split('@')
spec = spec_util.get_eval_spec(spec_file, spec_name, prename)
else:
spec = spec_util.get(spec_file, spec_name)
if 'spec_params' not in spec:
run_spec(spec, lab_mode)
else: # spec is parametrized; run them in parallel
param_specs = spec_util.get_param_specs(spec)
num_pro = spec['meta']['param_spec_process']
# can't use Pool since it cannot spawn nested Process, which is needed for VecEnv and parallel sessions. So these will run and wait by chunks
workers = [mp.Process(target=run_spec, args=(spec, lab_mode)) for spec in param_specs]
for chunk_w in ps.chunk(workers, num_pro):
for w in chunk_w:
w.start()
for w in chunk_w:
w.join()
示例2: fast_train_mp
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def fast_train_mp(self):
""" multi-cpu-core or mix cpu & multi-gpu """
self.init_device_emb()
self.emb_model.share_memory()
start_all = time.time()
ps = []
for i in range(len(self.args.gpus)):
p = mp.Process(target=self.fast_train_sp, args=(self.args.gpus[i],))
ps.append(p)
p.start()
for p in ps:
p.join()
print("Used time: %.2fs" % (time.time()-start_all))
if self.args.save_in_txt:
self.emb_model.save_embedding_txt(self.dataset, self.args.output_emb_file)
else:
self.emb_model.save_embedding(self.dataset, self.args.output_emb_file)
示例3: __init__
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def __init__(self, config):
mp.Process.__init__(self)
self.config = config
self.__pipe, self.__worker_pipe = mp.Pipe()
self._state = None
self._task = None
self._network = None
self._total_steps = 0
self.__cache_len = 2
if not config.async_actor:
self.start = lambda: None
self.step = self._sample
self.close = lambda: None
self._set_up()
self._task = config.task_fn()
示例4: __init__
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def __init__(self, master, process_id=0):
super(AgentSingleProcess, self).__init__(name = "Process-%d" % process_id)
# NOTE: self.master.* refers to parameters shared across all processes
# NOTE: self.* refers to process-specific properties
# NOTE: we are not copying self.master.* to self.* to keep the code clean
self.master = master
self.process_id = process_id
# env
self.env = self.master.env_prototype(self.master.env_params, self.process_id)
# model
self.model = self.master.model_prototype(self.master.model_params)
self._sync_local_with_global()
# experience
self._reset_experience()
示例5: __init__
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def __init__(self,
base_iterator: DataIterator,
num_workers: int = 1,
output_queue_size: int = 1000) -> None:
# pylint: disable=protected-access
super().__init__()
self.num_workers = num_workers
self.batch_size = base_iterator._batch_size
self.output_queue_size = output_queue_size
# These two options make the iterator stateful, which means it can't be shared
# across multiple processes.
if base_iterator._cache_instances:
raise ConfigurationError("cannot use Multiprocess iterator with cache_instances")
if base_iterator._instances_per_epoch:
raise ConfigurationError("cannot use instances_per_epoch with Multiprocess iterator")
self.iterator = base_iterator
self.processes: List[Process] = []
self.queuer: Optional[Process] = None
示例6: test_step
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def test_step(self):
def _run(rank, world_size):
model = nn.Linear(10, 1)
optimizer = DistributedAdamW(
model.parameters())
optimizer.zero_grad()
loss = model(torch.ones(10).float())
loss.backward()
optimizer.step()
processes = []
world_size = 4
for rank in range(world_size):
p = Process(target=init_processes,
args=(rank,
world_size,
_run))
p.start()
processes.append(p)
for p in processes:
p.join()
示例7: test_step
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def test_step(self):
def _run(rank, world_size):
model = nn.Linear(10, 1)
optimizer = DistributedSGD(
model.parameters())
optimizer.zero_grad()
loss = model(torch.ones(10).float())
loss.backward()
optimizer.step()
processes = []
world_size = 4
for rank in range(world_size):
p = Process(target=init_processes,
args=(rank,
world_size,
_run))
p.start()
processes.append(p)
for p in processes:
p.join()
示例8: __init__
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def __init__(self, rank):
docker_client = docker.from_env()
agent_port, partner_port = 10000 + rank, 20000 + rank
clients = [('127.0.0.1', agent_port), ('127.0.0.1', partner_port)]
self.agent_type = GlobalVar()
# Assume Minecraft launched if port has listener, launch otherwise
if not _port_has_listener(agent_port):
self._launch_malmo(docker_client, agent_port)
print('Malmo running on port ' + str(agent_port))
if not _port_has_listener(partner_port):
self._launch_malmo(docker_client, partner_port)
print('Malmo running on port ' + str(partner_port))
# Set up partner agent env in separate process
p = mp.Process(target=self._run_partner, args=(clients, ))
p.daemon = True
p.start()
time.sleep(3)
# Set up agent env
self.env = PigChaseEnvironment(clients, PigChaseTopDownStateBuilder(gray=False), role=1, randomize_positions=True)
示例9: __init__
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def __init__(self, n_train_processes):
self.nenvs = n_train_processes
self.waiting = False
self.closed = False
self.workers = list()
master_ends, worker_ends = zip(*[mp.Pipe() for _ in range(self.nenvs)])
self.master_ends, self.worker_ends = master_ends, worker_ends
for worker_id, (master_end, worker_end) in enumerate(zip(master_ends, worker_ends)):
p = mp.Process(target=worker,
args=(worker_id, master_end, worker_end))
p.daemon = True
p.start()
self.workers.append(p)
# Forbid master to use the worker end for messaging
for worker_end in worker_ends:
worker_end.close()
示例10: start
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def start(self):
# start a thread to read frames from the file video stream
if self.format == 'ssd':
if opt.sp:
p = Thread(target=self.getitem_ssd, args=())
else:
p = mp.Process(target=self.getitem_ssd, args=())
elif self.format == 'yolo':
if opt.sp:
p = Thread(target=self.getitem_yolo, args=())
else:
p = mp.Process(target=self.getitem_yolo, args=())
else:
raise NotImplementedError
p.daemon = True
p.start()
return self
示例11: parallelize_sessions
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def parallelize_sessions(self, global_nets=None):
mp_dict = mp.Manager().dict()
# spec_util.tick(self.spec, 'session')
# mp_run_session(deepcopy(self.spec), global_nets, mp_dict)
workers = []
for _s in range(self.spec['meta']['max_session']):
spec_util.tick(self.spec, 'session')
w = mp.Process(target=mp_run_session, args=(deepcopy(self.spec), global_nets, mp_dict))
w.start()
workers.append(w)
for w in workers:
w.join()
session_metrics_list = [mp_dict[idx] for idx in sorted(mp_dict.keys())]
return session_metrics_list
示例12: start
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def start(self, runtime_args):
self.processes = []
for rank in range(self.world_size):
self.processes.append(Process(target=self.init_process, args=(rank, self.fn, self.args, runtime_args)))
self.processes[-1].start()
示例13: init_parallel_jobs
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def init_parallel_jobs(system_config, dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(system_config, db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
示例14: main
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def main(args, devices):
# load reddit data
data = RedditDataset(self_loop=True)
train_mask = data.train_mask
val_mask = data.val_mask
test_mask = data.test_mask
features = th.Tensor(data.features)
in_feats = features.shape[1]
labels = th.LongTensor(data.labels)
n_classes = data.num_labels
# Construct graph
g = dgl.graph(data.graph.all_edges())
g.ndata['features'] = features
# Pack data
data = train_mask, val_mask, test_mask, in_feats, labels, n_classes, g
n_gpus = len(devices)
if devices[0] == -1:
run(0, 0, args, ['cpu'], data)
if n_gpus == 1:
run(0, n_gpus, args, devices, data)
else:
procs = []
for proc_id in range(n_gpus):
p = mp.Process(target=thread_wrapped_func(run),
args=(proc_id, n_gpus, args, devices, data))
p.start()
procs.append(p)
for p in procs:
p.join()
run(args, device, data)
示例15: fast_train_mp
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Process [as 别名]
def fast_train_mp(self):
""" multi-cpu-core or mix cpu & multi-gpu """
self.init_device_emb()
self.emb_model.share_memory()
if self.args.count_params:
sum_up_params(self.emb_model)
start_all = time.time()
ps = []
for i in range(len(self.args.gpus)):
p = mp.Process(target=self.fast_train_sp, args=(self.args.gpus[i],))
ps.append(p)
p.start()
for p in ps:
p.join()
print("Used time: %.2fs" % (time.time()-start_all))
if self.args.save_in_txt:
self.emb_model.save_embedding_txt(self.dataset, self.args.output_emb_file)
elif self.args.save_in_pt:
self.emb_model.save_embedding_pt(self.dataset, self.args.output_emb_file)
else:
self.emb_model.save_embedding(self.dataset, self.args.output_emb_file)