当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.Pipe方法代码示例

本文整理汇总了Python中multiprocessing.Pipe方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.Pipe方法的具体用法?Python multiprocessing.Pipe怎么用?Python multiprocessing.Pipe使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing的用法示例。


在下文中一共展示了multiprocessing.Pipe方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def __init__(self, env_fns, spaces=None):
        """
        envs: list of gym environments to run in subprocesses
        """
        self.waiting = False
        self.closed = False
        nenvs = len(env_fns)
        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
        self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
            for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
        for p in self.ps:
            p.daemon = True # if the main process crashes, we should not cause things to hang
            p.start()
        for remote in self.work_remotes:
            remote.close()

        self.remotes[0].send(('get_spaces', None))
        observation_space, action_space = self.remotes[0].recv()
        VecEnv.__init__(self, len(env_fns), observation_space, action_space) 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:21,代码来源:subproc_vec_env.py

示例2: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def __init__(self, constructor):
    """Step environment in a separate process for lock free paralellism.

    The environment will be created in the external process by calling the
    specified callable. This can be an environment class, or a function
    creating the environment and potentially wrapping it. The returned
    environment should not access global variables.

    Args:
      constructor: Callable that creates and returns an OpenAI gym environment.

    Attributes:
      observation_space: The cached observation space of the environment.
      action_space: The cached action space of the environment.
    """
    self._conn, conn = multiprocessing.Pipe()
    self._process = multiprocessing.Process(
        target=self._worker, args=(constructor, conn))
    atexit.register(self.close)
    self._process.start()
    self._observ_space = None
    self._action_space = None 
开发者ID:utra-robosoccer,项目名称:soccer-matlab,代码行数:24,代码来源:wrappers.py

示例3: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def __init__(self, env_fns, render_interval):
        """ Minor addition to SubprocVecEnv, automatically renders environments

        envs: list of gym environments to run in subprocesses
        """
        self.closed = False
        nenvs = len(env_fns)
        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
        self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
            for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
        for p in self.ps:
            p.daemon = True # if the main process crashes, we should not cause things to hang
            p.start()
        for remote in self.work_remotes:
            remote.close()

        self.remotes[0].send(('get_spaces', None))
        self.action_space, self.observation_space = self.remotes[0].recv()

        self.render_interval = render_interval
        self.render_timer = 0 
开发者ID:lnpalmer,项目名称:A2C,代码行数:23,代码来源:envs.py

示例4: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def __init__(self, env_fns):
        if np.__version__ == '1.16.0':
            warnings.warn("""
NumPy 1.16.0 can cause severe memory leak in chainerrl.envs.MultiprocessVectorEnv.
We recommend using other versions of NumPy.
See https://github.com/numpy/numpy/issues/12793 for details.
""")  # NOQA

        nenvs = len(env_fns)
        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
        self.ps = \
            [Process(target=worker, args=(work_remote, env_fn))
             for (work_remote, env_fn) in zip(self.work_remotes, env_fns)]
        for p in self.ps:
            p.start()
        self.last_obs = [None] * self.num_envs
        self.remotes[0].send(('get_spaces', None))
        self.action_space, self.observation_space = self.remotes[0].recv()
        self.closed = False 
开发者ID:chainer,项目名称:chainerrl,代码行数:21,代码来源:multiprocess_vector_env.py

示例5: _pipe

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def _pipe(self):
        """On Windows we use a pipe to emulate a Linux style character
        buffer."""
        if self._evdev:
            return None

        if not self.__pipe:
            target_function = self._get_target_function()
            if not target_function:
                return None

            self.__pipe, child_conn = Pipe(duplex=False)
            self._listener = Process(target=target_function,
                                     args=(child_conn,), daemon=True)
            self._listener.start()
        return self.__pipe 
开发者ID:zeth,项目名称:inputs,代码行数:18,代码来源:inputs.py

示例6: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def __init__(self, env_fns, spaces=None):
        """
        Arguments:

        env_fns: iterable of callables -  functions that create environments to run in subprocesses. Need to be cloud-pickleable
        """
        self.waiting = False
        self.closed = False
        nenvs = len(env_fns)
        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
        self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
        for p in self.ps:
            p.daemon = True  # if the main process crashes, we should not cause things to hang
            p.start()
        for remote in self.work_remotes:
            remote.close()

        self.remotes[0].send(('get_spaces', None))
        observation_space, action_space = self.remotes[0].recv()
        self.viewer = None
        VecEnv.__init__(self, len(env_fns), observation_space, action_space) 
开发者ID:quantumiracle,项目名称:Reinforcement_Learning_for_Traffic_Light_Control,代码行数:24,代码来源:subproc_vec_env.py

示例7: tcp_server_piped

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def tcp_server_piped(port=19543, use_stderr_logger=False):
    """
    TCP server context-manager used for integration tests.

    It starts server on localhost/given-port during context-manager start()
    and performs server-cleanup during context-manager stop()

    :param port: where to start server at
    :param use_stderr_logger: configure logging to output into stderr?
    :return: pair (server, inter-process-pipe used to control server)
    """
    client_process_pipe_endpoint, server_pipe_endpoint = Pipe()
    tcp_server = TcpServerPiped(host='localhost', port=port,
                                pipe_in=server_pipe_endpoint, delay=0,
                                use_stderr_logger=use_stderr_logger)
    tcp_server.start()
    time.sleep(0.5)  # allow server to boot-up
    # returning tcp_server let's you call tcp_server.terminate() to kill server
    # for testing dropped TCP connection
    yield (tcp_server, client_process_pipe_endpoint)
    client_process_pipe_endpoint.send(("shutdown", {}))
    tcp_server.join()

# ------------------- TCP server running in separate process --------------------- 
开发者ID:nokia,项目名称:moler,代码行数:26,代码来源:tcpserverpiped.py

示例8: main

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def main():
    parent_conn, child_conn = Pipe()

    child = Process(target=work, args=(child_conn,))

    for item in (
        42,
        'some string',
        {'one': 1},
        CustomClass(),
        None,
    ):
        print(
            "PRNT: send: {}".format(item)
        )
        parent_conn.send(item)

    child.start()
    child.join() 
开发者ID:PacktPublishing,项目名称:Expert-Python-Programming_Second-Edition,代码行数:21,代码来源:multiprocessing_pipes.py

示例9: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def __init__(self, env_fns, spaces=None):
        """
        envs: list of gym environments to run in subprocesses
        """
        self.waiting = False
        self.closed = False
        nenvs = len(env_fns)
        self.nenvs = nenvs
        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
        self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
            for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
        for p in self.ps:
            p.daemon = True # if the main process crashes, we should not cause things to hang
            p.start()
        for remote in self.work_remotes:
            remote.close()

        self.remotes[0].send(('get_spaces', None))
        observation_space, action_space = self.remotes[0].recv()
        VecEnv.__init__(self, len(env_fns), observation_space, action_space) 
开发者ID:sweetice,项目名称:Deep-reinforcement-learning-with-pytorch,代码行数:22,代码来源:multiprocessing_env.py

示例10: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def __init__(self, env_fns, horizon, trajectories, seed=0):
        """Launch subprocess workers and store configuration parameters.
        :param env_fns (list<()->ResettableEnv>): list of thunks.
        :param horizon (int): length of trajectories to search over.
        :param trajectories (int): minimum number of trajectories to evaluate.
               It will be rounded up to the nearest multiple of len(make_env)."""
        super().__init__(horizon, trajectories)
        nremotes = len(env_fns)
        # Integer ceiling of self.trajectories / nworkers
        traj_per_worker = (self.trajectories - 1) // nremotes + 1

        pipes = [Pipe() for _ in range(nremotes)]
        self.remotes, self.work_remotes = zip(*pipes)
        worker_cfgs = zip(self.work_remotes, self.remotes, env_fns)
        self.ps = []
        for i, (work_remote, remote, dynamic_fn) in enumerate(worker_cfgs):
            args = (work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker)
            process = Process(target=_worker, args=args)
            process.daemon = True
            # If the main process crashes, we should not cause things to hang
            process.start()
            self.ps.append(process)
        for remote in self.work_remotes:
            remote.close() 
开发者ID:HumanCompatibleAI,项目名称:adversarial-policies,代码行数:26,代码来源:monte_carlo.py

示例11: run

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def run(engine_factory, source_name, input_queue_maxsize, port, num_tokens,
        message_max_size=None):
    engine_read, server_write = multiprocessing.Pipe(duplex=False)

    # We cannot read from multiprocessing.Pipe without blocking the event
    # loop
    server_read, engine_write = os.pipe()

    local_server = _LocalServer(
        num_tokens, input_queue_maxsize, server_write, server_read)
    local_server.add_source_consumed(source_name)

    engine_process = multiprocessing.Process(
        target=_run_engine,
        args=(engine_factory, engine_read, server_read, engine_write))
    try:
        engine_process.start()
        os.close(engine_write)
        local_server.launch(port, message_max_size)
    finally:
        local_server.cleanup()
        os.close(server_read)

    raise Exception('Server stopped') 
开发者ID:cmusatyalab,项目名称:gabriel,代码行数:26,代码来源:local_engine.py

示例12: test_recursion

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def test_recursion(self):
        rconn, wconn = self.Pipe(duplex=False)
        self._test_recursion(wconn, [])

        time.sleep(DELTA)
        result = []
        while rconn.poll():
            result.append(rconn.recv())

        expected = [
            [],
              [0],
                [0, 0],
                [0, 1],
              [1],
                [1, 0],
                [1, 1]
            ]
        self.assertEqual(result, expected) 
开发者ID:IronLanguages,项目名称:ironpython2,代码行数:21,代码来源:test_multiprocessing.py

示例13: test_spawn_close

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def test_spawn_close(self):
        # We test that a pipe connection can be closed by parent
        # process immediately after child is spawned.  On Windows this
        # would have sometimes failed on old versions because
        # child_conn would be closed before the child got a chance to
        # duplicate it.
        conn, child_conn = self.Pipe()

        p = self.Process(target=self._echo, args=(child_conn,))
        p.daemon = True
        p.start()
        child_conn.close()    # this might complete before child initializes

        msg = latin('hello')
        conn.send_bytes(msg)
        self.assertEqual(conn.recv_bytes(), msg)

        conn.send_bytes(SENTINEL)
        conn.close()
        p.join() 
开发者ID:IronLanguages,项目名称:ironpython2,代码行数:22,代码来源:test_multiprocessing.py

示例14: test_fd_transfer

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def test_fd_transfer(self):
        if self.TYPE != 'processes':
            self.skipTest("only makes sense with processes")
        conn, child_conn = self.Pipe(duplex=True)

        p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
        p.daemon = True
        p.start()
        self.addCleanup(support.unlink, support.TESTFN)
        with open(support.TESTFN, "wb") as f:
            fd = f.fileno()
            if msvcrt:
                fd = msvcrt.get_osfhandle(fd)
            reduction.send_handle(conn, fd, p.pid)
        p.join()
        with open(support.TESTFN, "rb") as f:
            self.assertEqual(f.read(), b"foo") 
开发者ID:IronLanguages,项目名称:ironpython2,代码行数:19,代码来源:test_multiprocessing.py

示例15: test_large_fd_transfer

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Pipe [as 别名]
def test_large_fd_transfer(self):
        # With fd > 256 (issue #11657)
        if self.TYPE != 'processes':
            self.skipTest("only makes sense with processes")
        conn, child_conn = self.Pipe(duplex=True)

        p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
        p.daemon = True
        p.start()
        self.addCleanup(support.unlink, support.TESTFN)
        with open(support.TESTFN, "wb") as f:
            fd = f.fileno()
            for newfd in range(256, MAXFD):
                if not self._is_fd_assigned(newfd):
                    break
            else:
                self.fail("could not find an unassigned large file descriptor")
            os.dup2(fd, newfd)
            try:
                reduction.send_handle(conn, newfd, p.pid)
            finally:
                os.close(newfd)
        p.join()
        with open(support.TESTFN, "rb") as f:
            self.assertEqual(f.read(), b"bar") 
开发者ID:IronLanguages,项目名称:ironpython2,代码行数:27,代码来源:test_multiprocessing.py


注:本文中的multiprocessing.Pipe方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。