当前位置: 首页>>代码示例>>Python>>正文


Python registration.EnvSpec方法代码示例

本文整理汇总了Python中gym.envs.registration.EnvSpec方法的典型用法代码示例。如果您正苦于以下问题:Python registration.EnvSpec方法的具体用法?Python registration.EnvSpec怎么用?Python registration.EnvSpec使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在gym.envs.registration的用法示例。


在下文中一共展示了registration.EnvSpec方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from gym.envs import registration [as 别名]
# 或者: from gym.envs.registration import EnvSpec [as 别名]
def __init__(self, env, record_video=True, video_schedule=None,
            log_dir=None, timestep_limit=9999):
        # Ensure the version saved to disk doesn't monitor into our log_dir
        locals_no_monitor = dict(locals())
        locals_no_monitor['log_dir'] = None
        locals_no_monitor['record_video'] = False
        locals_no_monitor['video_schedule'] = None
        Serializable.quick_init(self, locals_no_monitor)

        self.env = env
        self._observation_space = to_rllab_space(env.observation_space)
        self._action_space = to_rllab_space(env.action_space)        
        self.env.spec = EnvSpec('GymEnv-v0')

        monitor.logger.setLevel(logging.WARNING)
        if not record_video:
            self.video_schedule = NoVideoSchedule()
        else:
            if video_schedule is None:
                self.video_schedule = CappedCubicVideoSchedule()
            else:
                self.video_schedule = video_schedule
        self.set_log_dir(log_dir)

        self._horizon = timestep_limit 
开发者ID:vicariousinc,项目名称:pixelworld,代码行数:27,代码来源:gym_env.py

示例2: __init__

# 需要导入模块: from gym.envs import registration [as 别名]
# 或者: from gym.envs.registration import EnvSpec [as 别名]
def __init__(self, max_episode_steps_coeff=1, scale=20, goal_padding=2.0):
        super(PointMass, self).__init__()
        # define scale such that the each square in the grid is 1 x 1
        self.scale = int(scale)
        self.grid_size = self.scale * self.scale
        self.observation_space = gym.spaces.Box(
            low=np.array([0.0, 0.0]),
            high=np.array([1.0, 1.0]))
        self.action_space = gym.spaces.Box(
            low=np.array([-np.inf, -np.inf]),
            high=np.array([np.inf, np.inf]))
        self.goal_padding = goal_padding
        self.spec = EnvSpec(id='PointMass-v0', max_episode_steps=int(max_episode_steps_coeff*self.scale)) 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:15,代码来源:pointmass.py

示例3: register

# 需要导入模块: from gym.envs import registration [as 别名]
# 或者: from gym.envs.registration import EnvSpec [as 别名]
def register(id, **kwargs):
    """Idempotent version of gym.envs.registration.registry.

    Needed since aprl.envs can get imported multiple times, e.g. when deserializing policies.
    """
    try:
        existing_spec = registration.spec(id)
        new_spec = registration.EnvSpec(id, **kwargs)
        assert existing_spec.__dict__ == new_spec.__dict__
    except gym.error.UnregisteredEnv:  # not previously registered
        registration.register(id, **kwargs)


# Low-dimensional multi-agent environments 
开发者ID:HumanCompatibleAI,项目名称:adversarial-policies,代码行数:16,代码来源:__init__.py

示例4: meta_reset

# 需要导入模块: from gym.envs import registration [as 别名]
# 或者: from gym.envs.registration import EnvSpec [as 别名]
def meta_reset(self, seed):
        np.random.seed(seed)

        env = RandomWeightHopperEnv(rand_mass=self.rand_mass,
                                    rand_gravity=self.rand_gravity,
                                    rand_friction=self.rand_friction,
                                    rand_thickness=self.rand_thickness)

        # Based on Hopper-v2
        spec = EnvSpec(
            'RandomWeightHopperEnv-v0',
            entry_point='generic_rl.envs.mujoco:RandomWeightHopperEnv',
            max_episode_steps=1000,
            reward_threshold=3800.0
        )

        env._spec = spec
        env.seed(seed)

        # Wrap the env as needed
        env = TimeLimit(
            env,
            max_episode_steps=spec.max_episode_steps,
            max_episode_seconds=spec.max_episode_seconds
        )

        self.env = env
        # Fix for done flags.
        self.env.reset()
        self.step = env.step
        self.render = env.render
        self.reset = env.reset 
开发者ID:openai,项目名称:EPG,代码行数:34,代码来源:random_robots.py

示例5: __init__

# 需要导入模块: from gym.envs import registration [as 别名]
# 或者: from gym.envs.registration import EnvSpec [as 别名]
def __init__(self):
        self.action_space = spaces.Discrete(2)
        self.observation_space = DICT_SPACE
        self._spec = EnvSpec("NestedDictEnv-v0")
        self.steps = 0 
开发者ID:ray-project,项目名称:ray,代码行数:7,代码来源:test_nested_observation_spaces.py

示例6: specification

# 需要导入模块: from gym.envs import registration [as 别名]
# 或者: from gym.envs.registration import EnvSpec [as 别名]
def specification(self) -> EnvSpec:
        """ Return environment specification """
        raise NotImplementedError 
开发者ID:MillionIntegrals,项目名称:vel,代码行数:5,代码来源:env_base.py

示例7: specification

# 需要导入模块: from gym.envs import registration [as 别名]
# 或者: from gym.envs.registration import EnvSpec [as 别名]
def specification(self) -> EnvSpec:
        """ Return environment specification """
        return gym.spec(self.envname) 
开发者ID:MillionIntegrals,项目名称:vel,代码行数:5,代码来源:classic_control.py

示例8: __init__

# 需要导入模块: from gym.envs import registration [as 别名]
# 或者: from gym.envs.registration import EnvSpec [as 别名]
def __init__(self, goal_reaching_thresholds=np.array([0.075, 0.075, 0.75]),
                 goal_not_reached_penalty=-1, goal_reached_reward=0, terminate_on_goal_reaching=True,
                 time_limit=1000, frameskip=1, random_goals_instead_of_standing_goal=False,
                 polar_coordinates: bool=False):
        super().__init__()
        dir = os.path.dirname(__file__)
        model = load_model_from_path(dir + "/pendulum_with_goals.xml")

        self.sim = MjSim(model)
        self.viewer = None
        self.rgb_viewer = None

        self.frameskip = frameskip
        self.goal = None
        self.goal_reaching_thresholds = goal_reaching_thresholds
        self.goal_not_reached_penalty = goal_not_reached_penalty
        self.goal_reached_reward = goal_reached_reward
        self.terminate_on_goal_reaching = terminate_on_goal_reaching
        self.time_limit = time_limit
        self.current_episode_steps_counter = 0
        self.random_goals_instead_of_standing_goal = random_goals_instead_of_standing_goal
        self.polar_coordinates = polar_coordinates

        # spaces definition
        self.action_space = spaces.Box(low=-self.sim.model.actuator_ctrlrange[:, 1],
                                       high=self.sim.model.actuator_ctrlrange[:, 1],
                                       dtype=np.float32)
        if self.polar_coordinates:
            self.observation_space = spaces.Dict({
                "observation": spaces.Box(low=np.array([-np.pi, -15]),
                                          high=np.array([np.pi, 15]),
                                          dtype=np.float32),
                "desired_goal": spaces.Box(low=np.array([-np.pi, -15]),
                                           high=np.array([np.pi, 15]),
                                           dtype=np.float32),
                "achieved_goal": spaces.Box(low=np.array([-np.pi, -15]),
                                            high=np.array([np.pi, 15]),
                                            dtype=np.float32)
            })
        else:
            self.observation_space = spaces.Dict({
                "observation": spaces.Box(low=np.array([-1, -1, -15]),
                                          high=np.array([1, 1, 15]),
                                          dtype=np.float32),
                "desired_goal": spaces.Box(low=np.array([-1, -1, -15]),
                                           high=np.array([1, 1, 15]),
                                           dtype=np.float32),
                "achieved_goal": spaces.Box(low=np.array([-1, -1, -15]),
                                            high=np.array([1, 1, 15]),
                                            dtype=np.float32)
            })

        self.spec = EnvSpec('PendulumWithGoals-v0')
        self.spec.reward_threshold = self.goal_not_reached_penalty * self.time_limit

        self.reset() 
开发者ID:NervanaSystems,项目名称:coach,代码行数:58,代码来源:pendulum_with_goals.py


注:本文中的gym.envs.registration.EnvSpec方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。