本文整理汇总了Python中gym.envs.mujoco.mujoco_env.MujocoEnv方法的典型用法代码示例。如果您正苦于以下问题:Python mujoco_env.MujocoEnv方法的具体用法?Python mujoco_env.MujocoEnv怎么用?Python mujoco_env.MujocoEnv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gym.envs.mujoco.mujoco_env
的用法示例。
在下文中一共展示了mujoco_env.MujocoEnv方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(
self,
direction=1,
maze_length=0.6,
sparse_reward=False,
no_reward=False,
include_vel=False,
episode_length=100,
):
utils.EzPickle.__init__(self)
self.sparse_reward = sparse_reward
self.no_reward = no_reward
self.include_vel = include_vel
self.max_episode_length = episode_length
self.direction = direction
self.length = maze_length
self.episode_length = 0
model = point_mass_maze(direction=self.direction, length=self.length)
with model.asfile() as f:
mujoco_env.MujocoEnv.__init__(self, f.name, 5)
示例2: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self, direction=1, maze_length=0.6,
sparse_reward=False, no_reward=False, episode_length=100, grayscale=True,
width=64, height=64):
utils.EzPickle.__init__(self)
self.sparse_reward = sparse_reward
self.no_reward = no_reward
self.max_episode_length = episode_length
self.direction = direction
self.length = maze_length
self.width = width
self.height = height
self.grayscale=grayscale
self.episode_length = 0
model = point_mass_maze(direction=self.direction, length=self.length, borders=False)
with model.asfile() as f:
mujoco_env.MujocoEnv.__init__(self, f.name, 5)
if self.grayscale:
self.observation_space = Box(0, 1, shape=(width, height))
else:
self.observation_space = Box(0, 1, shape=(width, height, 3))
示例3: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, 'reacher.xml', 2)
# def _step(self, a):
# # x = self._get_obs()[None]
# # assert np.allclose(self.get_body_com("fingertip")[:2], get_fingertips(x)),\
# # str(self.get_body_com("fingertip")) + " "+ str(get_fingertips(x))
# vec = self.get_body_com("fingertip")-self.get_body_com("target")
# reward_dist = - np.linalg.norm(vec[:2])
# reward_ctrl = - np.square(a).sum()*0.01
# reward = reward_dist + reward_ctrl
# self.do_simulation(a, self.frame_skip)
# ob = self._get_obs()
# done = False
# return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
示例4: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self, CentipedeLegNum=4, is_crippled=False):
# get the path of the environments
if is_crippled:
xml_name = 'CpCentipede' + self.get_env_num_str(CentipedeLegNum) + \
'.xml'
else:
xml_name = 'Centipede' + self.get_env_num_str(CentipedeLegNum) + \
'.xml'
xml_path = os.path.join(init_path.get_base_dir(),
'environments', 'assets',
xml_name)
xml_path = str(os.path.abspath(xml_path))
self.num_body = int(np.ceil(CentipedeLegNum / 2.0))
self._control_cost_coeff = .5 * 4 / CentipedeLegNum
self._contact_cost_coeff = 0.5 * 1e-3 * 4 / CentipedeLegNum
self.torso_geom_id = 1 + np.array(range(self.num_body)) * 5
# make sure the centipede is not born to be end of episode
self.body_qpos_id = 6 + 6 + np.array(range(self.num_body)) * 6
self.body_qpos_id[-1] = 5
mujoco_env.MujocoEnv.__init__(self, xml_path, 5)
utils.EzPickle.__init__(self)
示例5: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self, pod_number=3, is_crippled=False):
# get the path of the environments
if is_crippled:
xml_name = 'CrippledSnake' + self.get_env_num_str(pod_number) + \
'.xml'
else:
xml_name = 'Snake' + self.get_env_num_str(pod_number) + '.xml'
xml_path = os.path.join(os.path.join(init_path.get_base_dir(),
'environments', 'assets', xml_name))
xml_path = str(os.path.abspath(xml_path))
self.num_body = pod_number
self._direction = 0
self.ctrl_cost_coeff = 0.0001 / pod_number * 3
mujoco_env.MujocoEnv.__init__(self, xml_path, 4)
utils.EzPickle.__init__(self)
示例6: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self, pod_number=2):
# get the path of the environments
xml_name = 'Reacher' + self.get_env_num_str(pod_number) + '.xml'
xml_path = os.path.join(os.path.join(init_path.get_base_dir(),
'environments', 'assets', xml_name))
xml_path = str(os.path.abspath(xml_path))
# the environment coeff
self.num_body = pod_number + 1
self._task_indicator = -1.0
self._ctrl_coeff = 2.0 / (self.num_body / 2 + 1)
# norm the max penalty to be 1, max norm is self.num_body * 0.1 * 2
self._dist_coeff = 2.0 / self.num_body
mujoco_env.MujocoEnv.__init__(self, xml_path, 2)
utils.EzPickle.__init__(self)
示例7: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self,
xml_file='swimmer.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=1e-4,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
mujoco_env.MujocoEnv.__init__(self, xml_file, 4)
示例8: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'half_cheetah.xml', 1)
utils.EzPickle.__init__(self)
示例9: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'half_cheetah.xml', 5)
utils.EzPickle.__init__(self)
示例10: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self, max_timesteps=1000, disabled=False, gear=150):
self.timesteps = 0
self.max_timesteps = max_timesteps
if disabled:
model = angry_ant_crippled(gear=gear)
else:
model = ant_env(gear=gear)
with model.asfile() as f:
mujoco_env.MujocoEnv.__init__(self, f.name, 5)
示例11: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self, sparse_reward=False, no_reward=False, episode_length=200):
utils.EzPickle.__init__(self)
self.sparse_reward = sparse_reward
self.no_reward = no_reward
self.max_episode_length = episode_length
self.goal_pos = np.asarray([0.0, 0.0])
self.episode_length = 0
model = pusher(goal_pos=[self.goal_pos[0], self.goal_pos[1], -0.323])
with model.asfile() as f:
mujoco_env.MujocoEnv.__init__(self, f.name, 5)
示例12: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'inverted_double_pendulum.xml', 5)
utils.EzPickle.__init__(self)
示例13: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'humanoidstandup.xml', 5)
utils.EzPickle.__init__(self)
示例14: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 5)
utils.EzPickle.__init__(self)
示例15: __init__
# 需要导入模块: from gym.envs.mujoco import mujoco_env [as 别名]
# 或者: from gym.envs.mujoco.mujoco_env import MujocoEnv [as 别名]
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, 'pusher.xml', 5)