当前位置: 首页>>代码示例>>Python>>正文


Python spaces.Box方法代码示例

本文整理汇总了Python中gym.spaces.Box方法的典型用法代码示例。如果您正苦于以下问题:Python spaces.Box方法的具体用法?Python spaces.Box怎么用?Python spaces.Box使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在gym.spaces的用法示例。


在下文中一共展示了spaces.Box方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def __init__(self, renders=True):
    # start the bullet physics server
    self._renders = renders
    if (renders):
	    p.connect(p.GUI)
    else:
    	p.connect(p.DIRECT)

    observation_high = np.array([
          np.finfo(np.float32).max,
          np.finfo(np.float32).max,
          np.finfo(np.float32).max,
          np.finfo(np.float32).max])
    action_high = np.array([0.1])

    self.action_space = spaces.Discrete(9)
    self.observation_space = spaces.Box(-observation_high, observation_high)

    self.theta_threshold_radians = 1
    self.x_threshold = 2.4
    self._seed()
#    self.reset()
    self.viewer = None
    self._configure() 
开发者ID:utra-robosoccer,项目名称:soccer-matlab,代码行数:26,代码来源:cartpole_bullet.py

示例2: __init__

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def __init__(self):
        self._seed()
        self.viewer = None

        self.world = Box2D.b2World()
        self.moon = None
        self.lander = None
        self.particles = []

        self.prev_reward = None

        high = np.array([np.inf]*N_OBS_DIM)  # useful range is -1 .. +1, but spikes can be higher
        self.observation_space = spaces.Box(-high, high)

        self.action_space = spaces.Discrete(N_ACT_DIM)

        self.curr_step = None

        self._reset() 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:21,代码来源:lunar_lander.py

示例3: __init__

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def __init__(self, env, keys=None):
        """
        Initializes the Gym wrapper.

        Args:
            env (MujocoEnv instance): The environment to wrap.
            keys (list of strings): If provided, each observation will
                consist of concatenated keys from the wrapped environment's
                observation dictionary. Defaults to robot-state and object-state.
        """
        self.env = env

        if keys is None:
            assert self.env.use_object_obs, "Object observations need to be enabled."
            keys = ["robot-state", "object-state"]
        self.keys = keys

        # set up observation and action spaces
        flat_ob = self._flatten_obs(self.env.reset(), verbose=True)
        self.obs_dim = flat_ob.size
        high = np.inf * np.ones(self.obs_dim)
        low = -high
        self.observation_space = spaces.Box(low=low, high=high)
        low, high = self.env.action_spec
        self.action_space = spaces.Box(low=low, high=high) 
开发者ID:StanfordVL,项目名称:robosuite,代码行数:27,代码来源:gym_wrapper.py

示例4: observation_placeholder

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def observation_placeholder(ob_space, batch_size=None, name='Ob'):
    ''' 
    Create placeholder to feed observations into of the size appropriate to the observation space
    
    Parameters:
    ----------

    ob_space: gym.Space     observation space
    
    batch_size: int         size of the batch to be fed into input. Can be left None in most cases. 

    name: str               name of the placeholder

    Returns:
    -------

    tensorflow placeholder tensor
    '''

    assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box), \
        'Can only deal with Discrete and Box observation spaces for now'

    return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=ob_space.dtype, name=name) 
开发者ID:MaxSobolMark,项目名称:HardRLWithYoutube,代码行数:25,代码来源:input.py

示例5: encode_observation

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def encode_observation(ob_space, placeholder):
    '''
    Encode input in the way that is appropriate to the observation space

    Parameters:
    ----------
    
    ob_space: gym.Space             observation space
    
    placeholder: tf.placeholder     observation input placeholder
    '''
    if isinstance(ob_space, Discrete):
        return tf.to_float(tf.one_hot(placeholder, ob_space.n))

    elif isinstance(ob_space, Box):
        return tf.to_float(placeholder)
    else:
        raise NotImplementedError 
开发者ID:MaxSobolMark,项目名称:HardRLWithYoutube,代码行数:20,代码来源:input.py

示例6: __init__

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def __init__(
            self,
            seed=0,
            episode_len=None,
            no_images=None
    ):
        from tensorflow.examples.tutorials.mnist import input_data
        # we could use temporary directory for this with a context manager and 
        # TemporaryDirecotry, but then each test that uses mnist would re-download the data
        # this way the data is not cleaned up, but we only download it once per machine
        mnist_path = osp.join(tempfile.gettempdir(), 'MNIST_data')
        with filelock.FileLock(mnist_path + '.lock'):
           self.mnist = input_data.read_data_sets(mnist_path)

        self.np_random = np.random.RandomState()
        self.np_random.seed(seed)

        self.observation_space = Box(low=0.0, high=1.0, shape=(28,28,1))
        self.action_space = Discrete(10)
        self.episode_len = episode_len
        self.time = 0
        self.no_images = no_images

        self.train_mode()
        self.reset() 
开发者ID:MaxSobolMark,项目名称:HardRLWithYoutube,代码行数:27,代码来源:mnist_env.py

示例7: get_action_type

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def get_action_type(action_space):
    '''Method to get the action type to choose prob. dist. to sample actions from NN logits output'''
    if isinstance(action_space, spaces.Box):
        shape = action_space.shape
        assert len(shape) == 1
        if shape[0] == 1:
            return 'continuous'
        else:
            return 'multi_continuous'
    elif isinstance(action_space, spaces.Discrete):
        return 'discrete'
    elif isinstance(action_space, spaces.MultiDiscrete):
        return 'multi_discrete'
    elif isinstance(action_space, spaces.MultiBinary):
        return 'multi_binary'
    else:
        raise NotImplementedError


# action_policy base methods 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:22,代码来源:policy_util.py

示例8: set_gym_space_attr

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def set_gym_space_attr(gym_space):
    '''Set missing gym space attributes for standardization'''
    if isinstance(gym_space, spaces.Box):
        setattr(gym_space, 'is_discrete', False)
    elif isinstance(gym_space, spaces.Discrete):
        setattr(gym_space, 'is_discrete', True)
        setattr(gym_space, 'low', 0)
        setattr(gym_space, 'high', gym_space.n)
    elif isinstance(gym_space, spaces.MultiBinary):
        setattr(gym_space, 'is_discrete', True)
        setattr(gym_space, 'low', np.full(gym_space.n, 0))
        setattr(gym_space, 'high', np.full(gym_space.n, 2))
    elif isinstance(gym_space, spaces.MultiDiscrete):
        setattr(gym_space, 'is_discrete', True)
        setattr(gym_space, 'low', np.zeros_like(gym_space.nvec))
        setattr(gym_space, 'high', np.array(gym_space.nvec))
    else:
        raise ValueError('gym_space not recognized') 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:20,代码来源:base.py

示例9: __init__

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def __init__(self, size=2, discrete=True, partially_observable=False,
                 episodic=True, deterministic=False):
        self.size = size
        self.terminal_state = size
        self.episodic = episodic
        self.partially_observable = partially_observable
        self.deterministic = deterministic
        self.n_max_offset = 1
        # (s_0, ..., s_N) + terminal state + offset
        self.n_dim_obs = self.size + 1 + self.n_max_offset
        self.observation_space = spaces.Box(
            low=-np.inf, high=np.inf,
            shape=(self.n_dim_obs,), dtype=np.float32,
        )
        if discrete:
            self.action_space = spaces.Discrete(self.size)
        else:
            self.action_space = spaces.Box(
                low=-1.0, high=1.0,
                shape=(self.size,), dtype=np.float32,
            ) 
开发者ID:chainer,项目名称:chainerrl,代码行数:23,代码来源:abc.py

示例10: __init__

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def __init__(self, env, channel_order='hwc'):
        """Warp frames to 84x84 as done in the Nature paper and later work.

        To use this wrapper, OpenCV-Python is required.
        """
        if not _is_cv2_available:
            raise RuntimeError('Cannot import cv2 module. Please install OpenCV-Python to use WarpFrame.')  # NOQA
        gym.ObservationWrapper.__init__(self, env)
        self.width = 84
        self.height = 84
        shape = {
            'hwc': (self.height, self.width, 1),
            'chw': (1, self.height, self.width),
        }
        self.observation_space = spaces.Box(
            low=0, high=255,
            shape=shape[channel_order], dtype=np.uint8) 
开发者ID:chainer,项目名称:chainerrl,代码行数:19,代码来源:atari_wrappers.py

示例11: observation_placeholder

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def observation_placeholder(ob_space, batch_size=None, name='Ob'):
    '''
    Create placeholder to feed observations into of the size appropriate to the observation space

    Parameters:
    ----------

    ob_space: gym.Space     observation space

    batch_size: int         size of the batch to be fed into input. Can be left None in most cases.

    name: str               name of the placeholder

    Returns:
    -------

    tensorflow placeholder tensor
    '''

    assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box), \
        'Can only deal with Discrete and Box observation spaces for now'

    return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=ob_space.dtype, name=name) 
开发者ID:quantumiracle,项目名称:Reinforcement_Learning_for_Traffic_Light_Control,代码行数:25,代码来源:input.py

示例12: encode_observation

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def encode_observation(ob_space, placeholder):
    '''
    Encode input in the way that is appropriate to the observation space

    Parameters:
    ----------

    ob_space: gym.Space             observation space

    placeholder: tf.placeholder     observation input placeholder
    '''
    if isinstance(ob_space, Discrete):
        return tf.to_float(tf.one_hot(placeholder, ob_space.n))

    elif isinstance(ob_space, Box):
        return tf.to_float(placeholder)
    else:
        raise NotImplementedError 
开发者ID:quantumiracle,项目名称:Reinforcement_Learning_for_Traffic_Light_Control,代码行数:20,代码来源:input.py

示例13: __init__

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def __init__(self, file_name, batch_size=128, n_step=1):
        # create an offline_env to do fake interaction with agent
        self.num_epoch = 0
        self.num_record = 0
        self._offset = 0

        # how many records to read from table at one time
        self.batch_size = batch_size
        # number of step to reserved for n-step dqn
        self.n_step = n_step

        # defined the shape of observation and action
        # we follow the definition of gym.spaces
        # `Box` for continue-space, `Discrete` for discrete-space and `Dict` for multiple input
        # actually low/high limitation will not be used by agent but required by gym.spaces
        self.observation_space = Box(low=-np.inf, high=np.inf, shape=(4,))
        self.action_space = Discrete(n=2)

        fr = open(file_name)
        self.data = fr.readlines()
        self.num_record = len(self.data)
        fr.close() 
开发者ID:alibaba,项目名称:EasyRL,代码行数:24,代码来源:run_bcq_on_batchdata.py

示例14: __init__

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def __init__(self, game='pong', obs_type='image', buf_size=4, gray=True,
                 frameskip=4, repeat_action_probability=0.):
        super(MultiFrameAtariEnv, self).__init__(game=game, obs_type=obs_type,
                                                 frameskip=frameskip,
                                                 repeat_action_probability=repeat_action_probability)
        self._cur_st = None
        self._nx_st = None
        self._img_buf = deque(maxlen=buf_size)
        self._gray = gray
        self._shape = (84, 84)
        if self._gray:
            self.observation_space = spaces.Box(low=0, high=255,
                                                shape=(self._shape[0], self._shape[1], buf_size),
                                                dtype=np.uint8)
        else:
            self.observation_space = spaces.Box(low=0, high=255,
                                                shape=(self._shape[0], self._shape[1], 3, buf_size),
                                                dtype=np.uint8)
        self._initialize() 
开发者ID:neka-nat,项目名称:distributed_rl,代码行数:21,代码来源:wrapped_env.py

示例15: wrap_adv_noise_ball

# 需要导入模块: from gym import spaces [as 别名]
# 或者: from gym.spaces import Box [as 别名]
def wrap_adv_noise_ball(env_name, our_idx, multi_venv, adv_noise_params, deterministic):
    adv_noise_agent_val = adv_noise_params["noise_val"]
    base_policy_path = adv_noise_params["base_path"]
    base_policy_type = adv_noise_params["base_type"]
    base_policy = load_policy(
        policy_path=base_policy_path,
        policy_type=base_policy_type,
        env=multi_venv,
        env_name=env_name,
        index=our_idx,
    )

    base_action_space = multi_venv.action_space.spaces[our_idx]
    adv_noise_action_space = Box(
        low=adv_noise_agent_val * base_action_space.low,
        high=adv_noise_agent_val * base_action_space.high,
    )
    multi_venv = MergeAgentVecEnv(
        venv=multi_venv,
        policy=base_policy,
        replace_action_space=adv_noise_action_space,
        merge_agent_idx=our_idx,
        deterministic=deterministic,
    )
    return multi_venv 
开发者ID:HumanCompatibleAI,项目名称:adversarial-policies,代码行数:27,代码来源:train.py


注:本文中的gym.spaces.Box方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。