当前位置: 首页>>代码示例>>Python>>正文


Python gym.utils方法代码示例

本文整理汇总了Python中gym.utils方法的典型用法代码示例。如果您正苦于以下问题:Python gym.utils方法的具体用法?Python gym.utils怎么用?Python gym.utils使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在gym的用法示例。


在下文中一共展示了gym.utils方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: change_vehicles

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def change_vehicles(self, vehicle_class_path: str) -> 'AbstractEnv':
        """
        Change the type of all vehicles on the road

        :param vehicle_class_path: The path of the class of behavior for other vehicles
                             Example: "highway_env.vehicle.behavior.IDMVehicle"
        :return: a new environment with modified behavior model for other vehicles
        """
        vehicle_class = utils.class_from_path(vehicle_class_path)

        env_copy = copy.deepcopy(self)
        vehicles = env_copy.road.vehicles
        for i, v in enumerate(vehicles):
            if v is not env_copy.vehicle:
                vehicles[i] = vehicle_class.create_from(v)
        return env_copy 
开发者ID:eleurent,项目名称:highway-env,代码行数:18,代码来源:abstract.py

示例2: _seed

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def _seed(self, seed=None):
		self.np_random, seed = gym.utils.seeding.np_random(seed)
		self.robot.np_random = self.np_random # use the same np_randomizer for robot as for env
		return [seed] 
开发者ID:utra-robosoccer,项目名称:soccer-matlab,代码行数:6,代码来源:env_bases.py

示例3: get_keys_to_action

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def get_keys_to_action(self):
    """Get mapping from keyboard keys to actions.

    Required by gym.utils.play in environment or top level wrapper.

    Returns:
      {
        Unicode code point for keyboard key: action (formatted for step()),
        ...
      }
    """
    # Based on gym AtariEnv.get_keys_to_action()
    keyword_to_key = {
        "UP": ord("w"),
        "DOWN": ord("s"),
        "LEFT": ord("a"),
        "RIGHT": ord("d"),
        "FIRE": ord(" "),
    }

    keys_to_action = {}

    for action_id, action_meaning in enumerate(self.action_meanings):
      keys_tuple = tuple(sorted([
          key for keyword, key in keyword_to_key.items()
          if keyword in action_meaning]))
      assert keys_tuple not in keys_to_action
      keys_to_action[keys_tuple] = action_id

    # Special actions:
    keys_to_action[(ord("r"),)] = self.RETURN_DONE_ACTION
    keys_to_action[(ord("c"),)] = self.TOGGLE_WAIT_ACTION
    keys_to_action[(ord("n"),)] = self.WAIT_MODE_NOOP_ACTION

    return keys_to_action 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:37,代码来源:player.py

示例4: _seed

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def _seed(self, seed=None):
    from gym.utils import seeding
    self.np_random, seed = seeding.np_random(seed)
    return [seed] 
开发者ID:vladfi1,项目名称:gym-dolphin,代码行数:6,代码来源:ssbm_env.py

示例5: __init__

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def __init__(self, game='pong', obs_type='ram', frameskip=(2, 5), repeat_action_probability=0.):
        """Frameskip should be either a tuple (indicating a random range to
        choose from, with the top value exclude), or an int."""

        utils.EzPickle.__init__(self, game, obs_type)
        assert obs_type in ('ram', 'image')

        self.game_path = atari_py.get_game_path(game)
        if not os.path.exists(self.game_path):
            raise IOError('You asked for game %s but path %s does not exist'%(game, self.game_path))
        self._obs_type = obs_type
        self.frameskip = frameskip
        self.ale = atari_py.ALEInterface()
        self.viewer = None

        # Tune (or disable) ALE's action repeat:
        # https://github.com/openai/gym/issues/349
        assert isinstance(repeat_action_probability, (float, int)), "Invalid repeat_action_probability: {!r}".format(repeat_action_probability)
        self.ale.setFloat('repeat_action_probability'.encode('utf-8'), repeat_action_probability)

        self.seed()

        self._action_set = self.ale.getMinimalActionSet()
        self.action_space = spaces.Discrete(len(self._action_set))

        (screen_width,screen_height) = self.ale.getScreenDims()
        if self._obs_type == 'ram':
            self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8, shape=(128,))
        elif self._obs_type == 'image':
            self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3), dtype=np.uint8)
        else:
            raise error.Error('Unrecognized observation type: {}'.format(self._obs_type)) 
开发者ID:ArztSamuel,项目名称:DRL_DeliveryDuel,代码行数:34,代码来源:atari_env.py

示例6: __init__

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def __init__(self, width=5, height=5, render_type='cubes', num_objects=5,
                 seed=None):
        self.width = width
        self.height = height
        self.render_type = render_type

        self.num_objects = num_objects
        self.num_actions = 4 * self.num_objects  # Move NESW

        self.colors = utils.get_colors(num_colors=max(9, self.num_objects))

        self.np_random = None
        self.game = None
        self.target = None

        # Initialize to pos outside of env for easier collision resolution.
        self.objects = [[-1, -1] for _ in range(self.num_objects)]

        # If True, then check for collisions and don't allow two
        #   objects to occupy the same position.
        self.collisions = True

        self.action_space = spaces.Discrete(self.num_actions)
        self.observation_space = spaces.Box(
            low=0, high=1,
            shape=(3, self.width, self.height),
            dtype=np.float32
        )

        self.seed(seed)
        self.reset() 
开发者ID:tkipf,项目名称:c-swm,代码行数:33,代码来源:block_pushing.py

示例7: __init__

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def __init__(self, screen_ratio=4, coords_ratio=4, use_color=True, use_rc_frame=True, stack=3, frame_skip=4, action_repeat=4):
        utils.EzPickle.__init__(self, 'montezuma_revenge', 'image')
        self.env = gym.make('MontezumaRevengeNoFrameskip-v4').unwrapped
        self.ale = self.env.ale
        self.ale.setFloat('repeat_action_probability'.encode('utf-8'), 0) # deterministic
        self.max_lives = self.ale.lives()
        # observations
        self.screen_ratio = screen_ratio
        self.original_height = 224
        self.original_width = 160
        self.screen_height = self.original_height // screen_ratio
        self.screen_width = self.original_width // screen_ratio
        self.screen_shape = (self.screen_height, self.screen_width)
        self.use_color = use_color
        self.use_rc_frame = use_rc_frame
        self.stack = stack
        self.frame_skip = frame_skip
        n_frames = stack * (3 * use_color + 1 * (not use_color) + use_rc_frame)
        self.frames = deque([], maxlen=(self.frame_skip * (self.stack - 1) + 1))
        self.observation_space = spaces.Box(low=0, high=255, shape=(self.screen_height, self.screen_width, n_frames))
        # coordinates
        self.coords_ratio = coords_ratio
        assert coords_ratio % screen_ratio == 0, (coords_ratio, screen_ratio)
        self.coords_screen_ratio = coords_ratio // screen_ratio
        self.coords_height = self.original_height // coords_ratio
        self.coords_width = self.original_width // coords_ratio
        self.coords_shape = (self.coords_height, self.coords_width)
        # actions
        self.action_repeat = action_repeat
        self.action_names = ['LEFTFIRE', 'UP', 'RIGHTFIRE', 'LEFT', 'NOOP', 'RIGHT', 'DOWN']
        self.action_list = [actions[n] for n in self.action_names]
        n_actions = len(self.action_list)
        self.action_space = spaces.Discrete(n_actions)
        # miscellaneous
        frame_name = 'RGB' if use_color else 'G'
        if use_rc_frame: frame_name += 'C'
        self.name = 'CustomMontezuma_obs{}x{}x{}x{}_qframes{}x{}x{}_skip{}_repeat{}-v0'.format(
            *self.screen_shape, frame_name, stack, *self.coords_shape, n_actions, frame_skip, action_repeat) 
开发者ID:fabiopardo,项目名称:qmap,代码行数:40,代码来源:custom_montezuma.py

示例8: __init__

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def __init__(self, game='pong', obs_type='ram', frameskip=(2, 5), repeat_action_probability=0.):
        """Frameskip should be either a tuple (indicating a random range to
        choose from, with the top value exclude), or an int."""

        utils.EzPickle.__init__(self, game, obs_type, frameskip, repeat_action_probability)
        assert obs_type in ('ram', 'image')

        self.game_path = atari_py.get_game_path(game)
        if not os.path.exists(self.game_path):
            raise IOError('You asked for game %s but path %s does not exist'%(game, self.game_path))
        self._obs_type = obs_type
        self.frameskip = frameskip
        self.ale = atari_py.ALEInterface()
        self.viewer = None

        # Tune (or disable) ALE's action repeat:
        # https://github.com/openai/gym/issues/349
        assert isinstance(repeat_action_probability, (float, int)), "Invalid repeat_action_probability: {!r}".format(repeat_action_probability)
        self.ale.setFloat('repeat_action_probability'.encode('utf-8'), repeat_action_probability)

        self.seed()

        self._action_set = self.ale.getMinimalActionSet()
        self.action_space = spaces.Discrete(len(self._action_set))

        (screen_width,screen_height) = self.ale.getScreenDims()
        if self._obs_type == 'ram':
            self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8, shape=(128,))
        elif self._obs_type == 'image':
            self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3), dtype=np.uint8)
        else:
            raise error.Error('Unrecognized observation type: {}'.format(self._obs_type)) 
开发者ID:joanby,项目名称:ia-course,代码行数:34,代码来源:atari_env.py

示例9: _seed

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def _seed(self, seed=None):
        self.np_random, seed = gym.utils.seeding.np_random(seed)
        return [seed] 
开发者ID:gkahn13,项目名称:GtS,代码行数:5,代码来源:env_bases.py

示例10: _seed

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def _seed(self, seed=None):
		self.np_random, seed = gym.utils.seeding.np_random(seed)
		return [seed] 
开发者ID:benelot,项目名称:bullet-gym,代码行数:5,代码来源:gym_mujoco_xml_env.py

示例11: __init__

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def __init__(self, client_id, base_url=allocator_base,
                 address_type=None, start_timeout=None, api_key=None,
                 runtime_id=None, params=None, placement=None,
                 use_recorder_ports=False,
    ):
        super(AllocatorManager, self).__init__()
        self.label = 'AllocatorManager'

        self.supports_reconnect = True
        self.connect_vnc = True
        self.connect_rewarder = True

        if address_type is None: address_type = 'public'
        if address_type not in ['public', 'pod', 'private']:
            raise error.Error('Bad address type specified: {}. Must be public, pod, or private.'.format(address_type))

        self.client_id = client_id
        self.address_type = address_type

        if start_timeout is None:
            start_timeout = 20 * 60
        self.start_timeout = start_timeout
        self.params = params
        self.placement = placement
        self.use_recorder_ports = use_recorder_ports

#         if base_url is None:
#             base_url = scoreboard.api_base
#         if base_url is None:
#             base_url = gym_base_url
#         if api_key is None:
#             api_key = scoreboard.api_key
#         if api_key is None:
#             raise gym.error.AuthenticationError("""You must provide an OpenAI Gym API key.

# (HINT: Set your API key using "gym.scoreboard.api_key = .." or "export OPENAI_GYM_API_KEY=..."). You can find your API key in the OpenAI Gym web interface: https://gym.openai.com/settings/profile.""")

        if api_key is None:
            api_key = _api_key
        self._requestor = AllocatorClient(self.label, api_key, base_url=base_url)
        self.base_url = base_url

        # These could be overridden on a per-allocation basis, if you
        # want heterogeoneous envs. We don't support those currently
        # in the higher layers, but this layer could support it
        # easily.
        self.runtime_id = runtime_id

        self.pending = {}

        self.error_buffer = utils.ErrorBuffer()
        self.requests = queue.Queue()
        self.ready = queue.Queue()

        self._reconnect_history = {}
        self._sleep = 1 
开发者ID:openai,项目名称:universe,代码行数:58,代码来源:allocator_remote.py

示例12: reset

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def reset(self):
        del self.client
        self.init_airsim_client()
        try:
            # def simSetVehiclePose(self, pose, ignore_collison, vehicle_name=''):
            # def simGetVehiclePose(self, vehicle_name=''):
            # def simGetObjectPose(self, object_name):
            self.client.enableApiControl(True)
            self.client.reset()

            # position = airsim.Vector3r(self.env_start_pos[0], self.env_start_pos[1], self.env_start_pos[2])
            # heading = airsim.utils.to_quaternion(0, 0, 0)
            # pose = airsim.Pose(position, heading)
            # self.client.simSetVehiclePose(pose, ignore_collison=True)
            if(self.first_run == 1):
                # self.client.simSetVehiclePose(pose, ignore_collison=False)
                self.first_run = 0
            self.client.enableApiControl(True)
            self.client.armDisarm(True)

            # self.client.simSetVehiclePose(pose=self.air_sim_vehicle_pose, ignore_collison=1)
            self.client.enableApiControl(True)
            self.client.takeoffAsync(1)
            self.client.hoverAsync()
            self.get_state()
            self.need_replan = 1

            # self.client.simPause()
        except Exception as e:
            print(e)
            print(colorize("===== Error in reset =====","red"))
            self.init_airsim_client()
            self.reset()

        if (self.if_log):
            self.plot_log()
        self.log_idx = 0
        self.step_count = 0
        self.sum_reward = 0.0
        self.sim_times = self.sim_times + 1
        self.client.moveByAngleThrottleAsync(0,0,0.6,0,3e8)
        time.sleep(1)
        return np.array(self.state) 
开发者ID:hku-mars,项目名称:crossgap_il_rl,代码行数:45,代码来源:cross_grap_env.py

示例13: reset

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def reset(self):
        del self.client
        self.init_airsim_client()
        try:
            # def simSetVehiclePose(self, pose, ignore_collison, vehicle_name=''):
            # def simGetVehiclePose(self, vehicle_name=''):
            # def simGetObjectPose(self, object_name):
            self.client.enableApiControl(True)
            self.client.reset()

            # position = airsim.Vector3r(self.env_start_pos[0], self.env_start_pos[1], self.env_start_pos[2])
            # heading = airsim.utils.to_quaternion(0, 0, 0)
            # pose = airsim.Pose(position, heading)
            # self.client.simSetVehiclePose(pose, ignore_collison=True)
            if(self.first_run == 1):
                # self.client.simSetVehiclePose(pose, ignore_collison=False)
                self.first_run = 0
            self.client.enableApiControl(True)
            self.client.armDisarm(True)

            # self.client.simSetVehiclePose(pose=self.air_sim_vehicle_pose, ignore_collison=1)
            self.client.enableApiControl(True)
            self.client.takeoffAsync(0.01)
            self.client.hoverAsync()
            self.get_state()

            self.need_replan = 1
            self.first_reward = 1

            # self.client.simPause()
        except Exception as e:
            print(e)
            print(colorize("===== Error in reset =====","red"))
            self.init_airsim_client()
            self.reset()

        if (self.if_log):
            self.plot_log()
        self.log_idx = 0
        self.step_count = 0
        self.sum_reward = 0.0
        self.sim_times = self.sim_times + 1
        self.client.moveByAngleThrottleAsync(0,0,0.6,0,3e8)
        time.sleep(1)
        return np.array(self.state) 
开发者ID:hku-mars,项目名称:crossgap_il_rl,代码行数:47,代码来源:cross_grap_env_v2.py

示例14: __init__

# 需要导入模块: import gym [as 别名]
# 或者: from gym import utils [as 别名]
def __init__(
            self,
            game='pong',
            mode=None,
            difficulty=None,
            obs_type='ram',
            frameskip=(2, 5),
            repeat_action_probability=0.,
            full_action_space=False):
        """Frameskip should be either a tuple (indicating a random range to
        choose from, with the top value exclude), or an int."""

        utils.EzPickle.__init__(
                self,
                game,
                mode,
                difficulty,
                obs_type,
                frameskip,
                repeat_action_probability)
        assert obs_type in ('ram', 'image')

        self.game = game
        self.game_path = atari_py.get_game_path(game)
        self.game_mode = mode
        self.game_difficulty = difficulty

        if not os.path.exists(self.game_path):
            msg = 'You asked for game %s but path %s does not exist'
            raise IOError(msg % (game, self.game_path))
        self._obs_type = obs_type
        self.frameskip = frameskip
        self.ale = atari_py.ALEInterface()
        self.viewer = None

        # Tune (or disable) ALE's action repeat:
        # https://github.com/openai/gym/issues/349
        assert isinstance(repeat_action_probability, (float, int)), \
                "Invalid repeat_action_probability: {!r}".format(repeat_action_probability)
        self.ale.setFloat(
                'repeat_action_probability'.encode('utf-8'),
                repeat_action_probability)

        self.seed()

        self._action_set = (self.ale.getLegalActionSet() if full_action_space
                            else self.ale.getMinimalActionSet())
        self.action_space = spaces.Discrete(len(self._action_set))

        (screen_width, screen_height) = self.ale.getScreenDims()
        if self._obs_type == 'ram':
            self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8, shape=(128,))
        elif self._obs_type == 'image':
            self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3), dtype=np.uint8)
        else:
            raise error.Error('Unrecognized observation type: {}'.format(self._obs_type)) 
开发者ID:hust512,项目名称:DQN-DDPG_Stock_Trading,代码行数:58,代码来源:atari_env.py


注:本文中的gym.utils方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。