本文整理汇总了Python中torch.multiprocessing.Pipe方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.Pipe方法的具体用法?Python multiprocessing.Pipe怎么用?Python multiprocessing.Pipe使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.multiprocessing
的用法示例。
在下文中一共展示了multiprocessing.Pipe方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Pipe [as 别名]
def __init__(self, config):
mp.Process.__init__(self)
self.config = config
self.__pipe, self.__worker_pipe = mp.Pipe()
self._state = None
self._task = None
self._network = None
self._total_steps = 0
self.__cache_len = 2
if not config.async_actor:
self.start = lambda: None
self.step = self._sample
self.close = lambda: None
self._set_up()
self._task = config.task_fn()
示例2: _inference
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Pipe [as 别名]
def _inference(self, cand):
# bn_statistic
parent_conn, child_conn = mp.Pipe()
args = dict({"local_rank": 0, "distributed": False})
mp.spawn(
bn_statistic, nprocs=self.ngpus_per_node,
args=(self.ngpus_per_node, cfg, args, cand, child_conn))
salt = parent_conn.recv()
# fitness
parent_conn, child_conn = mp.Pipe()
args = dict({"local_rank": 0, "distributed": False})
mp.spawn(
fitness, nprocs=self.ngpus_per_node,
args=(self.ngpus_per_node, cfg, args, cand, salt, child_conn))
if os.path.isfile(os.path.join(cfg.OUTPUT_DIR, salt+".pth")):
os.remove(os.path.join(cfg.OUTPUT_DIR, salt+".pth"))
return parent_conn.recv()
示例3: request
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Pipe [as 别名]
def request(decode_queue, resolution, index, fps=24.0, duration=4.0):
res2quality = {240: 0, 360: 1, 480: 2, 720: 3, 1080: 4}
video_dir = os.path.join(opt.data_dir, '{}p'.format(resolution))
start_time = time.time()
video_info = util.videoInfo(fps, duration, res2quality[resolution])
output_output, output_input = mp.Pipe(duplex=False)
decode_queue.put((os.path.join(video_dir, 'segment_init.mp4'), os.path.join(video_dir, 'segment_{}.m4s'.format(index)), output_input, video_info))
while(1):
input = output_output.recv()
if input[0] == 'output':
end_time = time.time()
print('overall [elapsed], resolution [{}p] : {}sec'.format(resolution, end_time - start_time))
break
else:
print('request: Invalid input')
break
示例4: __init__
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Pipe [as 别名]
def __init__(self, n_train_processes):
self.nenvs = n_train_processes
self.waiting = False
self.closed = False
self.workers = list()
master_ends, worker_ends = zip(*[mp.Pipe() for _ in range(self.nenvs)])
self.master_ends, self.worker_ends = master_ends, worker_ends
for worker_id, (master_end, worker_end) in enumerate(zip(master_ends, worker_ends)):
p = mp.Process(target=worker,
args=(worker_id, master_end, worker_end))
p.daemon = True
p.start()
self.workers.append(p)
# Forbid master to use the worker end for messaging
for worker_end in worker_ends:
worker_end.close()
示例5: init_vae_training_subprocess
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Pipe [as 别名]
def init_vae_training_subprocess(self):
assert isinstance(self.replay_buffer, SharedObsDictRelabelingBuffer)
self._vae_conn_pipe, process_pipe = Pipe()
self._vae_training_process = Process(
target=subprocess_train_vae_loop,
args=(
process_pipe,
self.vae,
self.vae.state_dict(),
self.replay_buffer,
self.replay_buffer.get_mp_info(),
ptu.device,
)
)
self._vae_training_process.start()
self._vae_conn_pipe.send(self.vae_trainer)
示例6: __init__
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Pipe [as 别名]
def __init__(self, memory_size, batch_size, replay_type=Config.DEFAULT_REPLAY):
mp.Process.__init__(self)
self.pipe, self.worker_pipe = mp.Pipe()
self.memory_size = memory_size
self.batch_size = batch_size
self.cache_len = 2
self.replay_type = replay_type
self.start()
示例7: __init__
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Pipe [as 别名]
def __init__(self, args, trainer_maker):
self.comms = []
self.trainer = trainer_maker()
# itself will do the same job as workers
self.nworkers = args.nprocesses - 1
for i in range(self.nworkers):
comm, comm_remote = mp.Pipe()
self.comms.append(comm)
worker = MultiProcessWorker(i, trainer_maker, comm_remote, seed=args.seed)
worker.start()
self.grads = None
self.worker_grads = None
self.is_random = args.random
示例8: main
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Pipe [as 别名]
def main():
args = parse_args()
mp.set_start_method('spawn') # Using spawn is decided.
_logger = log.get_logger(__name__, args)
_logger.info(print_args(args))
loaders = []
file_list = os.listdir(args.train_file)
random.shuffle(file_list)
for i in range(args.worker):
loader = data_loader.DataLoader(
args.train_file,
args.dict_file,
separate_conj_stmt=args.direction,
binary=args.binary,
part_no=i,
part_total=args.worker,
file_list=file_list,
norename=args.norename,
filter_abelian=args.fabelian,
compatible=args.compatible)
loaders.append(loader)
loader.start_reader()
net, mid_net, loss_fn = create_models(args, loaders[0], allow_resume=True)
# Use fake modules to replace the real ones
net = FakeModule(net)
if mid_net is not None:
mid_net = FakeModule(mid_net)
for i in range(len(loss_fn)):
loss_fn[i] = FakeModule(loss_fn[i])
opt = get_opt(net, mid_net, loss_fn, args)
inqueues = []
outqueues = []
plist = []
for i in range(args.worker):
recv_p, send_p = Pipe(False)
recv_p2, send_p2 = Pipe(False)
inqueues.append(send_p)
outqueues.append(recv_p2)
plist.append(
Process(target=worker, args=(recv_p, send_p2, loaders[i], args, i)))
plist[-1].start()
_logger.warning('Training begins')
train(inqueues, outqueues, net, mid_net, loss_fn, opt, loaders, args, _logger)
loader.destruct()
for p in plist:
p.terminate()
for loader in loaders:
loader.destruct()
_logger.warning('Training ends')
示例9: __init__
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Pipe [as 别名]
def __init__(
self,
env,
policy,
exploration_policy,
max_path_length,
train_rollout_function,
eval_rollout_function,
num_workers=2,
):
Serializable.quick_init(self, locals())
super().__init__(env)
self.num_workers = num_workers
# Let self.worker_limits[True] be the max number of workers for training
# and self.worker_limits[False] be the max number of workers for eval.
self.worker_limits = {
True: math.ceil(self.num_workers / 2),
False: math.ceil(self.num_workers / 2),
}
self.parent_pipes = []
self.child_pipes = []
for _ in range(num_workers):
parent_conn, child_conn = Pipe()
self.parent_pipes.append(parent_conn)
self.child_pipes.append(child_conn)
self._workers = [
Process(
target=RemoteRolloutEnv._worker_loop,
args=(
self.child_pipes[i],
env,
policy,
exploration_policy,
max_path_length,
cloudpickle.dumps(train_rollout_function),
cloudpickle.dumps(eval_rollout_function),
)
)
for i in range(num_workers)]
for worker in self._workers:
worker.start()
self.free_pipes = set(self.parent_pipes)
# self.pipe_info[pipe] stores (epoch, train_type)
self.pipe_info = {}
# Let self.promise_list[True] be the promises for training
# and self.promise_list[False] be the promises for eval.
self.rollout_promise_list = {
True: [],
False: [],
}
示例10: run_parameter_sweep
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Pipe [as 别名]
def run_parameter_sweep(parameters,data,args,Beta):
output = []
num_processes = torch.cuda.device_count()
batches = int(len(parameters) / num_processes)
idx = 0
objectives = []
nsigs = []
times = []
while idx <= len(parameters)-num_processes:
print(idx)
pipe_list = []
processes = []
for rank in range(num_processes):
recv_end, send_end = mp.Pipe(False)
p = mp.Process(target=run_method_engine, args=(data, parameters.iloc[idx+rank]['a'], parameters.iloc[idx+rank]['phi'], parameters.iloc[idx+rank]['b'], Beta,
args.prior_on_W, args.prior_on_H, parameters.iloc[idx+rank]['K0'], args.tolerance,args.max_iter, send_end, rank,))
pipe_list.append(recv_end)
processes.append(p)
p.start()
result_list = [x.recv() for x in pipe_list]
for p in processes:
p.join()
nsig = [write_output(x[0],x[1],data.channel_names,data.sample_names,args.output_dir,
parameters['label'][idx+i]) for i,x in enumerate(result_list)]
[nsigs.append(ns) for i,ns in enumerate(nsig)]
[times.append(time[3]) for i,time in enumerate(result_list)]
[objectives.append(obj[2]) for i,obj in enumerate(result_list)]
idx += num_processes
if idx < len(parameters):
for i in range(len(parameters)-idx):
idx+=i
W,H,cost,time = run_method_engine(data, parameters.iloc[idx]['a'], parameters.iloc[idx]['phi'], parameters.iloc[idx]['b'], Beta,
args.prior_on_W, args.prior_on_H, parameters.iloc[idx]['K0'], args.tolerance,args.max_iter)
nsig = write_output(W,H,data.channel_names,data.sample_names,args.output_dir,
parameters['label'][idx])
times.append(time)
nsigs.append(nsig)
objectives.append(cost)
parameters['nsigs'] = nsigs
parameters['objective'] = objectives
parameters['times'] = times
parameters.to_csv(args.output_dir + '/parameters_with_results.txt',sep='\t',index=None)
示例11: __init__
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Pipe [as 别名]
def __init__(self, env_fns, engine):
super(SubProcEnvManager, self).__init__(env_fns, engine)
self.waiting = False
self.closed = False
self.processes = []
self._zmq_context = zmq.Context()
self._zmq_ports = []
self._zmq_sockets = []
# make a temporary env to get stuff
dummy = env_fns[0]()
self._observation_space = dummy.observation_space
self._action_space = dummy.action_space
self._cpu_preprocessor = dummy.cpu_preprocessor
self._gpu_preprocessor = dummy.gpu_preprocessor
dummy.close()
# iterate envs to get torch shared memory through pipe then close it
shared_memories = []
for w_ind in range(self.nb_env):
pipe, w_pipe = mp.Pipe()
socket, port = zmq_robust_bind_socket(self._zmq_context)
process = mp.Process(
target=worker,
args=(w_pipe, pipe, port, CloudpickleWrapper(env_fns[w_ind])),
)
process.daemon = True
process.start()
self.processes.append(process)
self._zmq_sockets.append(socket)
pipe.send(("get_shared_memory", None))
shared_memories.append(pipe.recv())
# switch to zmq socket and close pipes
pipe.send(("switch_zmq", None))
pipe.close()
w_pipe.close()
self.shared_memories = listd_to_dlist(shared_memories)