当前位置: 首页>>代码示例>>Python>>正文


Python mpi4py.MPI属性代码示例

本文整理汇总了Python中mpi4py.MPI属性的典型用法代码示例。如果您正苦于以下问题:Python mpi4py.MPI属性的具体用法?Python mpi4py.MPI怎么用?Python mpi4py.MPI使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在mpi4py的用法示例。


在下文中一共展示了mpi4py.MPI属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: mpi_fork

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def mpi_fork(n, extra_mpi_args=[]):
    """Re-launches the current script with workers
    Returns "parent" for original parent, "child" for MPI children
    """
    if n <= 1:
        return "child"
    if os.getenv("IN_MPI") is None:
        env = os.environ.copy()
        env.update(
            MKL_NUM_THREADS="1",
            OMP_NUM_THREADS="1",
            IN_MPI="1"
        )
        # "-bind-to core" is crucial for good performance
        args = ["mpirun", "-np", str(n)] + \
            extra_mpi_args + \
            [sys.executable]

        args += sys.argv
        subprocess.check_call(args, env=env)
        return "parent"
    else:
        install_mpi_excepthook()
        return "child" 
开发者ID:MaxSobolMark,项目名称:HardRLWithYoutube,代码行数:26,代码来源:util.py

示例2: make_mujoco_env

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def make_mujoco_env(env_id, seed, reward_scale=1.0):
    """
    Create a wrapped, monitored gym.Env for MuJoCo.
    """
    rank = MPI.COMM_WORLD.Get_rank()
    myseed = seed  + 1000 * rank if seed is not None else None
    set_global_seeds(myseed)
    env = gym.make(env_id)
    logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))
    env = Monitor(env, logger_path, allow_early_resets=True)
    env.seed(seed)

    if reward_scale != 1.0:
        from baselines.common.retro_wrappers import RewardScaler
        env = RewardScaler(env, reward_scale)

    return env 
开发者ID:MaxSobolMark,项目名称:HardRLWithYoutube,代码行数:19,代码来源:cmd_util.py

示例3: make_vec_env

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, start_index=0, reward_scale=1.0):
    """
    Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
    """
    if wrapper_kwargs is None: wrapper_kwargs = {}
    mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
    def make_env(rank): # pylint: disable=C0111
        def _thunk():
            env = make_atari(env_id) if env_type == 'atari' else gym.make(env_id)
            env.seed(seed + 10000*mpi_rank + rank if seed is not None else None)
            env = Monitor(env,
                          logger.get_dir() and os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(rank)),
                          allow_early_resets=True)

            if env_type == 'atari': return wrap_deepmind(env, **wrapper_kwargs)
            elif reward_scale != 1: return RewardScaler(env, reward_scale)
            else: return env
        return _thunk
    set_global_seeds(seed)
    if num_env > 1: return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
    else: return DummyVecEnv([make_env(start_index)]) 
开发者ID:quantumiracle,项目名称:Reinforcement_Learning_for_Traffic_Light_Control,代码行数:23,代码来源:cmd_util.py

示例4: make_vec_env

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, start_index=0, reward_scale=1.0, gamestate=None):
    """
    Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
    """
    if wrapper_kwargs is None: wrapper_kwargs = {}
    mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
    seed = seed + 10000 * mpi_rank if seed is not None else None
    def make_thunk(rank):
        return lambda: make_env(
            env_id=env_id,
            env_type=env_type,
            subrank = rank,
            seed=seed,
            reward_scale=reward_scale,
            gamestate=gamestate,
            wrapper_kwargs=wrapper_kwargs
        )

    set_global_seeds(seed)
    if num_env > 1:
        return SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)])
    else:
        return DummyVecEnv([make_thunk(start_index)]) 
开发者ID:hiwonjoon,项目名称:ICML2019-TREX,代码行数:25,代码来源:cmd_util.py

示例5: make_env

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def make_env(env_id, env_type, subrank=0, seed=None, reward_scale=1.0, gamestate=None, wrapper_kwargs={}):
    mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
    if env_type == 'atari':
        env = make_atari(env_id)
    elif env_type == 'retro':
        import retro
        gamestate = gamestate or retro.State.DEFAULT
        env = retro_wrappers.make_retro(game=env_id, max_episode_steps=10000, use_restricted_actions=retro.Actions.DISCRETE, state=gamestate)
    else:
        env = gym.make(env_id)

    env.seed(seed + subrank if seed is not None else None)
    env = Monitor(env,
                  logger.get_dir() and os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(subrank)),
                  allow_early_resets=True)

    if env_type == 'atari':
        env = wrap_deepmind(env, **wrapper_kwargs)
    elif env_type == 'retro':
        env = retro_wrappers.wrap_deepmind_retro(env, **wrapper_kwargs)

    if reward_scale != 1:
        env = retro_wrappers.RewardScaler(env, reward_scale)

    return env 
开发者ID:hiwonjoon,项目名称:ICML2019-TREX,代码行数:27,代码来源:cmd_util.py

示例6: _add_hook_if_enabled

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def _add_hook_if_enabled():
    # An MPI runtime is expected to kill all of its child processes
    # if one of them exits abnormally or without calling `MPI_Finalize()`.
    # However, when a Python program run on `mpi4py`, the MPI runtime
    # often fails to detect a process failure, and the rest of the processes
    # hang infinitely.
    # It is problematic especially when you run ChainerMN programs on a cloud
    # environment, on which you are charged on time basis.
    # See https://github.com/chainer/chainermn/issues/236 for more discussion.
    #
    # To activate this handler, set CHAINERMN_FORCE_ABORT_ON_EXCEPTION
    # to a non-empty value.
    # Note that you need to pass an argument to mpiexec (-x for Open MPI)
    # to activate the handler in all processes.
    var = os.environ.get('CHAINERMN_FORCE_ABORT_ON_EXCEPTION')
    if var is not None and len(var) > 0:
        add_hook() 
开发者ID:chainer,项目名称:chainer,代码行数:19,代码来源:global_except_hook.py

示例7: test_mvpa_voxel_selection

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def test_mvpa_voxel_selection():
    data = prng.rand(5, 5, 5, 8).astype(np.float32)
    # all MPI processes read the mask; the mask file is small
    mask = np.ones([5, 5, 5], dtype=np.bool)
    mask[0, 0, :] = False
    labels = [0, 1, 0, 1, 0, 1, 0, 1]
    # 2 subjects, 4 epochs per subject
    sl = Searchlight(sl_rad=1)
    mvs = MVPAVoxelSelector(data, mask, labels, 2, sl)
    # for cross validation, use SVM with precomputed kernel

    clf = svm.SVC(kernel='rbf', C=10, gamma='auto')
    result_volume, results = mvs.run(clf)
    if MPI.COMM_WORLD.Get_rank() == 0:
        output = []
        for tuple in results:
            if tuple[1] > 0:
                output.append(int(8*tuple[1]))
        expected_output = [6, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4,
                           4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 1]
        assert np.allclose(output, expected_output, atol=1), \
            'voxel selection via SVM does not provide correct results' 
开发者ID:brainiak,项目名称:brainiak,代码行数:24,代码来源:test_mvpa_voxel_selection.py

示例8: train

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def train(env, seed, policy_fn, reward_giver, dataset, algo,
          g_step, d_step, policy_entcoeff, num_timesteps, save_per_iter,
          checkpoint_dir, log_dir, pretrained, BC_max_iter, task_name=None):

    pretrained_weight = None
    if pretrained and (BC_max_iter > 0):
        # Pretrain with behavior cloning
        from baselines.gail import behavior_clone
        pretrained_weight = behavior_clone.learn(env, policy_fn, dataset,
                                                 max_iters=BC_max_iter)

    if algo == 'trpo':
        from baselines.gail import trpo_mpi
        # Set up for MPI seed
        rank = MPI.COMM_WORLD.Get_rank()
        if rank != 0:
            logger.set_level(logger.DISABLED)
        workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
        set_global_seeds(workerseed)
        env.seed(workerseed)
        trpo_mpi.learn(env, policy_fn, reward_giver, dataset, rank,
                       pretrained=pretrained, pretrained_weight=pretrained_weight,
                       g_step=g_step, d_step=d_step,
                       entcoeff=policy_entcoeff,
                       max_timesteps=num_timesteps,
                       ckpt_dir=checkpoint_dir, log_dir=log_dir,
                       save_per_iter=save_per_iter,
                       timesteps_per_batch=1024,
                       max_kl=0.01, cg_iters=10, cg_damping=0.1,
                       gamma=0.995, lam=0.97,
                       vf_iters=5, vf_stepsize=1e-3,
                       task_name=task_name)
    else:
        raise NotImplementedError 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:36,代码来源:run_mujoco.py

示例9: install_mpi_excepthook

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def install_mpi_excepthook():
    import sys
    from mpi4py import MPI
    old_hook = sys.excepthook

    def new_hook(a, b, c):
        old_hook(a, b, c)
        sys.stdout.flush()
        sys.stderr.flush()
        MPI.COMM_WORLD.Abort()
    sys.excepthook = new_hook 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:13,代码来源:util.py

示例10: mpi_fork

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def mpi_fork(n):
    """Re-launches the current script with workers
    Returns "parent" for original parent, "child" for MPI children
    """
    if n <= 1:
        return "child"
    if os.getenv("IN_MPI") is None:
        env = os.environ.copy()
        env.update(
            MKL_NUM_THREADS="1",
            OMP_NUM_THREADS="1",
            IN_MPI="1"
        )
        # "-bind-to core" is crucial for good performance
        args = [
            "mpirun",
            "-np",
            str(n),
            "-bind-to",
            "core",
            sys.executable
        ]
        args += sys.argv
        subprocess.check_call(args, env=env)
        return "parent"
    else:
        install_mpi_excepthook()
        return "child" 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:30,代码来源:util.py

示例11: reshape_for_broadcasting

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def reshape_for_broadcasting(source, target):
    """Reshapes a tensor (source) to have the correct shape and dtype of the target
    before broadcasting it with MPI.
    """
    dim = len(target.get_shape())
    shape = ([1] * (dim-1)) + [-1]
    return tf.reshape(tf.cast(source, target.dtype), shape) 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:9,代码来源:util.py

示例12: parse_args

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def parse_args():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('--env-id', type=str, default='HalfCheetah-v1')
    boolean_flag(parser, 'render-eval', default=False)
    boolean_flag(parser, 'layer-norm', default=True)
    boolean_flag(parser, 'render', default=False)
    boolean_flag(parser, 'normalize-returns', default=False)
    boolean_flag(parser, 'normalize-observations', default=True)
    parser.add_argument('--seed', help='RNG seed', type=int, default=0)
    parser.add_argument('--critic-l2-reg', type=float, default=1e-2)
    parser.add_argument('--batch-size', type=int, default=64)  # per MPI worker
    parser.add_argument('--actor-lr', type=float, default=1e-4)
    parser.add_argument('--critic-lr', type=float, default=1e-3)
    boolean_flag(parser, 'popart', default=False)
    parser.add_argument('--gamma', type=float, default=0.99)
    parser.add_argument('--reward-scale', type=float, default=1.)
    parser.add_argument('--clip-norm', type=float, default=None)
    parser.add_argument('--nb-epochs', type=int, default=500)  # with default settings, perform 1M steps total
    parser.add_argument('--nb-epoch-cycles', type=int, default=20)
    parser.add_argument('--nb-train-steps', type=int, default=50)  # per epoch cycle and MPI worker
    parser.add_argument('--nb-eval-steps', type=int, default=100)  # per epoch cycle and MPI worker
    parser.add_argument('--nb-rollout-steps', type=int, default=100)  # per epoch cycle and MPI worker
    parser.add_argument('--noise-type', type=str, default='adaptive-param_0.2')  # choices are adaptive-param_xx, ou_xx, normal_xx, none
    parser.add_argument('--num-timesteps', type=int, default=None)
    boolean_flag(parser, 'evaluation', default=False)
    args = parser.parse_args()
    # we don't directly specify timesteps for this script, so make sure that if we do specify them
    # they agree with the other parameters
    if args.num_timesteps is not None:
        assert(args.num_timesteps == args.nb_epochs * args.nb_epoch_cycles * args.nb_rollout_steps)
    dict_args = vars(args)
    del dict_args['num_timesteps']
    return dict_args 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:36,代码来源:main.py

示例13: main

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def main(custom_args=[]):
    # configure logger, disable logging in child MPI processes (with rank > 0) 
    arg_parser = common_arg_parser()
    args, unknown_args = arg_parser.parse_known_args()
    extra_args = {}
    for arg in custom_args:
        if arg in vars(args).keys():
            vars(args)[arg] = custom_args[arg]
        else:
            extra_args[arg] = custom_args[arg]
    
    #extra_args = {k: parse(v) for k,v in parse_unknown_args(unknown_args).items()}

    
    if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
        rank = 0
        logger.configure(format_strs = ['stdout', 'tensorboard'])
    else:
        logger.configure(format_strs = ['stdout', 'tensorboard'])
        rank = MPI.COMM_WORLD.Get_rank()

    model, _ = train(args, extra_args)

    if args.save_path is not None and rank == 0:
        save_path = osp.expanduser(args.save_path)
        model.save(save_path)
    

    if args.play:
        logger.log("Running trained model")
        env = build_env(args)
        obs = env.reset()
        while True:
            actions = model.step(obs)[0]
            obs, _, done, _  = env.step(actions)
            env.render()
            done = done.any() if isinstance(done, np.ndarray) else done

            if done:
                obs = env.reset() 
开发者ID:MaxSobolMark,项目名称:HardRLWithYoutube,代码行数:42,代码来源:run.py

示例14: make_atari_env

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def make_atari_env(env_id, num_env, seed, wrapper_kwargs=None, start_index=0):
    """
    Create a wrapped, monitored SubprocVecEnv for Atari.
    """
    if wrapper_kwargs is None: wrapper_kwargs = {}
    mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
    def make_env(rank): # pylint: disable=C0111
        def _thunk():
            env = make_atari(env_id)
            env.seed(seed + 10000*mpi_rank + rank if seed is not None else None)
            env = Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(rank)))
            return wrap_deepmind(env, **wrapper_kwargs)
        return _thunk
    set_global_seeds(seed)
    return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)]) 
开发者ID:MaxSobolMark,项目名称:HardRLWithYoutube,代码行数:17,代码来源:cmd_util.py

示例15: __init__

# 需要导入模块: import mpi4py [as 别名]
# 或者: from mpi4py import MPI [as 别名]
def __init__(self, use_mpi):
        self.use_mpi = use_mpi

        # Setup
        if self.use_mpi:
            if not mpi_available:
                raise RuntimeError('ChainerMN required for MPI but cannot be imported. Abort.')
            comm = chainermn.create_communicator(FLAGS.comm_name)
            if comm.mpi_comm.rank == 0:
                print('==========================================')
                print('Num process (COMM_WORLD): {}'.format(MPI.COMM_WORLD.Get_size()))
                print('Communcator name: {}'.format(FLAGS.comm_name))
                print('==========================================')
            fleet_size = MPI.COMM_WORLD.Get_size()
            device = comm.intra_rank
        else:
            fleet_size = 1
            comm = None
            device = FLAGS.gpu

        self.fleet_size, self.comm, self.device = fleet_size, comm, device

        self.is_master = is_master = not self.use_mpi or (self.use_mpi and comm.rank == 0)

        # Early works
        if is_master:
            record_setting(FLAGS.out)

        # Show effective hps
        effective_hps = {
            'is_master': self.is_master,
            'stage_interval': self.stage_interval,
            'dynamic_batch_size': self.dynamic_batch_size
        }
        self.print_log('Effective hps: {}'.format(effective_hps)) 
开发者ID:pfnet-research,项目名称:chainer-stylegan,代码行数:37,代码来源:train.py


注:本文中的mpi4py.MPI属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。