本文整理汇总了Python中atari_py.get_game_path方法的典型用法代码示例。如果您正苦于以下问题:Python atari_py.get_game_path方法的具体用法?Python atari_py.get_game_path怎么用?Python atari_py.get_game_path使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类atari_py
的用法示例。
在下文中一共展示了atari_py.get_game_path方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import atari_py [as 别名]
# 或者: from atari_py import get_game_path [as 别名]
def __init__(self, args, process_ind=0, num_envs_per_process=1):
super(AtariEnv, self).__init__(args, process_ind, num_envs_per_process)
# env_params for this env
assert self.num_envs_per_process == 1
self.seed = self.seed + self.process_ind * self.num_envs_per_actor # NOTE: check again
# setup ale
self.ale = atari_py.ALEInterface()
self.ale.setInt('random_seed', self.seed)
self.ale.setInt('max_num_frames', self.early_stop)
self.ale.setFloat('repeat_action_probability', 0) # Disable sticky actions
self.ale.setInt('frame_skip', 0)
self.ale.setBool('color_averaging', False)
print(atari_py.get_game_path(self.game))
self.ale.loadROM(atari_py.get_game_path(self.game)) # ROM loading must be done after setting options
actions = self.ale.getMinimalActionSet()
self.actions = dict([i, e] for i, e in zip(range(len(actions)), actions))
self.lives = 0 # life counter (used in DeepMind training)
self.just_died = False # when lost one life, but game is still not over
# setup
self.exp_state1 = deque(maxlen=self.state_cha)
self._reset_experience()
示例2: __init__
# 需要导入模块: import atari_py [as 别名]
# 或者: from atari_py import get_game_path [as 别名]
def __init__(self, args):
self.device = args.device
self.ale = atari_py.ALEInterface()
self.ale.setInt('random_seed', args.seed)
self.ale.setInt('max_num_frames_per_episode', args.max_episode_length)
self.ale.setFloat('repeat_action_probability', 0) # Disable sticky actions
self.ale.setInt('frame_skip', 0)
self.ale.setBool('color_averaging', False)
self.ale.loadROM(atari_py.get_game_path(args.game)) # ROM loading must be done after setting options
actions = self.ale.getMinimalActionSet()
self.actions = dict([i, e] for i, e in zip(range(len(actions)), actions))
self.lives = 0 # Life counter (used in DeepMind training)
self.life_termination = False # Used to check if resetting only from loss of life
self.window = args.history_length # Number of frames to concatenate
self.state_buffer = deque([], maxlen=args.history_length)
self.training = True # Consistent with model training mode
示例3: __init__
# 需要导入模块: import atari_py [as 别名]
# 或者: from atari_py import get_game_path [as 别名]
def __init__(self, args):
self.device = args.device
self.ale = atari_py.ALEInterface()
self.ale.setInt("random_seed", args.seed)
self.ale.setInt("max_num_frames_per_episode", args.max_episode_length)
self.ale.setFloat("repeat_action_probability", 0) # Disable sticky actions
self.ale.setInt("frame_skip", 0)
self.ale.setBool("color_averaging", False)
# ROM loading must be done after setting options
self.ale.loadROM(atari_py.get_game_path(args.game))
actions = self.ale.getMinimalActionSet()
self.actions = dict([i, e] for i, e in zip(range(len(actions)), actions))
self.action_space = spaces.Discrete(len(self.actions))
self.lives = 0 # Life counter (used in DeepMind training)
self.life_termination = False # Used to check if resetting only from loss of life
self.window = args.history_length # Number of frames to concatenate
self.state_buffer = deque([], maxlen=args.history_length)
self.training = True # Consistent with model training mode
示例4: __init__
# 需要导入模块: import atari_py [as 别名]
# 或者: from atari_py import get_game_path [as 别名]
def __init__(self, game='pong', obs_type='ram', frameskip=(2, 5), repeat_action_probability=0.):
"""Frameskip should be either a tuple (indicating a random range to
choose from, with the top value exclude), or an int."""
utils.EzPickle.__init__(self, game, obs_type)
assert obs_type in ('ram', 'image')
self.game_path = atari_py.get_game_path(game)
if not os.path.exists(self.game_path):
raise IOError('You asked for game %s but path %s does not exist'%(game, self.game_path))
self._obs_type = obs_type
self.frameskip = frameskip
self.ale = atari_py.ALEInterface()
self.viewer = None
# Tune (or disable) ALE's action repeat:
# https://github.com/openai/gym/issues/349
assert isinstance(repeat_action_probability, (float, int)), "Invalid repeat_action_probability: {!r}".format(repeat_action_probability)
self.ale.setFloat('repeat_action_probability'.encode('utf-8'), repeat_action_probability)
self.seed()
self._action_set = self.ale.getMinimalActionSet()
self.action_space = spaces.Discrete(len(self._action_set))
(screen_width,screen_height) = self.ale.getScreenDims()
if self._obs_type == 'ram':
self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8, shape=(128,))
elif self._obs_type == 'image':
self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3), dtype=np.uint8)
else:
raise error.Error('Unrecognized observation type: {}'.format(self._obs_type))
示例5: get_num_actions
# 需要导入模块: import atari_py [as 别名]
# 或者: from atari_py import get_game_path [as 别名]
def get_num_actions(rom_path, rom_name):
#import os
#print os.path.abspath(atari_py.__file__)
game_path = atari_py.get_game_path(rom_name)
ale = atari_py.ALEInterface()
ale.loadROM(game_path)
return ale.getMinimalActionSet()
示例6: __init__
# 需要导入模块: import atari_py [as 别名]
# 或者: from atari_py import get_game_path [as 别名]
def __init__(self, game='pong', obs_type='ram', frameskip=(2, 5), repeat_action_probability=0.):
"""Frameskip should be either a tuple (indicating a random range to
choose from, with the top value exclude), or an int."""
utils.EzPickle.__init__(self, game, obs_type, frameskip, repeat_action_probability)
assert obs_type in ('ram', 'image')
self.game_path = atari_py.get_game_path(game)
if not os.path.exists(self.game_path):
raise IOError('You asked for game %s but path %s does not exist'%(game, self.game_path))
self._obs_type = obs_type
self.frameskip = frameskip
self.ale = atari_py.ALEInterface()
self.viewer = None
# Tune (or disable) ALE's action repeat:
# https://github.com/openai/gym/issues/349
assert isinstance(repeat_action_probability, (float, int)), "Invalid repeat_action_probability: {!r}".format(repeat_action_probability)
self.ale.setFloat('repeat_action_probability'.encode('utf-8'), repeat_action_probability)
self.seed()
self._action_set = self.ale.getMinimalActionSet()
self.action_space = spaces.Discrete(len(self._action_set))
(screen_width,screen_height) = self.ale.getScreenDims()
if self._obs_type == 'ram':
self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8, shape=(128,))
elif self._obs_type == 'image':
self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3), dtype=np.uint8)
else:
raise error.Error('Unrecognized observation type: {}'.format(self._obs_type))
示例7: __init__
# 需要导入模块: import atari_py [as 别名]
# 或者: from atari_py import get_game_path [as 别名]
def __init__(self,
game="pong",
frame_skip=4, # Frames per step (>=1).
num_img_obs=4, # Number of (past) frames in observation (>=1).
clip_reward=True,
episodic_lives=True,
fire_on_reset=False,
max_start_noops=30,
repeat_action_probability=0.,
horizon=27000,
):
save__init__args(locals(), underscore=True)
# ALE
game_path = atari_py.get_game_path(game)
if not os.path.exists(game_path):
raise IOError("You asked for game {} but path {} does not "
" exist".format(game, game_path))
self.ale = atari_py.ALEInterface()
self.ale.setFloat(b'repeat_action_probability', repeat_action_probability)
self.ale.loadROM(game_path)
# Spaces
self._action_set = self.ale.getMinimalActionSet()
self._action_space = IntBox(low=0, high=len(self._action_set))
obs_shape = (num_img_obs, H, W)
self._observation_space = IntBox(low=0, high=255, shape=obs_shape,
dtype="uint8")
self._max_frame = self.ale.getScreenGrayscale()
self._raw_frame_1 = self._max_frame.copy()
self._raw_frame_2 = self._max_frame.copy()
self._obs = np.zeros(shape=obs_shape, dtype="uint8")
# Settings
self._has_fire = "FIRE" in self.get_action_meanings()
self._has_up = "UP" in self.get_action_meanings()
self._horizon = int(horizon)
self.reset()
示例8: __init__
# 需要导入模块: import atari_py [as 别名]
# 或者: from atari_py import get_game_path [as 别名]
def __init__(self, game, seed=None, use_sdl=False, n_last_screens=4,
frame_skip=4, treat_life_lost_as_terminal=True,
crop_or_scale='scale', max_start_nullops=30,
record_screen_dir=None):
assert crop_or_scale in ['crop', 'scale']
assert frame_skip >= 1
self.n_last_screens = n_last_screens
self.treat_life_lost_as_terminal = treat_life_lost_as_terminal
self.crop_or_scale = crop_or_scale
self.max_start_nullops = max_start_nullops
# atari_py is used only to provide rom files. atari_py has its own
# ale_python_interface, but it is obsolete.
if not atari_py_available:
raise RuntimeError(
'You need to install atari_py>=0.1.1 to use ALE.')
game_path = atari_py.get_game_path(game)
ale = atari_py.ALEInterface()
if seed is not None:
assert seed >= 0 and seed < 2 ** 31, \
"ALE's random seed must be in [0, 2 ** 31)."
else:
# Use numpy's random state
seed = np.random.randint(0, 2 ** 31)
ale.setInt(b'random_seed', seed)
ale.setFloat(b'repeat_action_probability', 0.0)
ale.setBool(b'color_averaging', False)
if record_screen_dir is not None:
ale.setString(b'record_screen_dir',
str.encode(str(record_screen_dir)))
self.frame_skip = frame_skip
if use_sdl:
if 'DISPLAY' not in os.environ:
raise RuntimeError(
'Please set DISPLAY environment variable for use_sdl=True')
# SDL settings below are from the ALE python example
if sys.platform == 'darwin':
import pygame
pygame.init()
ale.setBool(b'sound', False) # Sound doesn't work on OSX
elif sys.platform.startswith('linux'):
ale.setBool(b'sound', True)
ale.setBool(b'display_screen', True)
ale.loadROM(str.encode(str(game_path)))
assert ale.getFrameNumber() == 0
self.ale = ale
self.legal_actions = ale.getMinimalActionSet()
self.initialize()
self.action_space = spaces.Discrete(len(self.legal_actions))
one_screen_observation_space = spaces.Box(
low=0, high=255,
shape=(84, 84), dtype=np.uint8,
)
self.observation_space = spaces.Tuple(
[one_screen_observation_space] * n_last_screens)
示例9: __init__
# 需要导入模块: import atari_py [as 别名]
# 或者: from atari_py import get_game_path [as 别名]
def __init__(self, rom_path, rom_name, visualize, actor_id, rseed, single_life_episode = False):
self.ale = atari_py.ALEInterface()
self.ale.setInt("random_seed", rseed * (actor_id +1))
# For fuller control on explicit action repeat (>= ALE 0.5.0)
self.ale.setFloat("repeat_action_probability", 0.0)
# See: http://is.gd/tYzVpj
self.ale.setInt("frame_skip", 4)
#self.ale.setBool("color_averaging", False)
self.ale.loadROM(atari_py.get_game_path(rom_name))
self.legal_actions = self.ale.getMinimalActionSet()
self.single_life_episode = single_life_episode
self.initial_lives = self.ale.lives()
# Processed frames that will be fed in to the network
# (i.e., four 84x84 images)
self.processed_imgs = np.zeros((IMG_SIZE_X, IMG_SIZE_Y,
NR_IMAGES), dtype=np.uint8)
self.screen_width,self.screen_height = self.ale.getScreenDims()
self.rgb_screen = np.zeros((self.screen_height,self.screen_width, 4), dtype=np.uint8)
self.gray_screen = np.zeros((self.screen_height,self.screen_width,1), dtype=np.uint8)
self.visualize = visualize
self.visualize_processed = False
rendering_imported = False
# if self.visualize:
# from gym.envs.classic_control import rendering
# rendering_imported = True
# logger.debug("Opening emulator window...")
# self.viewer = rendering.SimpleImageViewer()
# self.render()
# logger.debug("Emulator window opened")
#
# if self.visualize_processed:
# if not rendering_imported:
# from gym.envs.classic_control import rendering
# logger.debug("Opening emulator window...")
# self.viewer2 = rendering.SimpleImageViewer()
# self.render()
# logger.debug("Emulator window opened")
示例10: __init__
# 需要导入模块: import atari_py [as 别名]
# 或者: from atari_py import get_game_path [as 别名]
def __init__(
self,
game='pong',
mode=None,
difficulty=None,
obs_type='ram',
frameskip=(2, 5),
repeat_action_probability=0.,
full_action_space=False):
"""Frameskip should be either a tuple (indicating a random range to
choose from, with the top value exclude), or an int."""
utils.EzPickle.__init__(
self,
game,
mode,
difficulty,
obs_type,
frameskip,
repeat_action_probability)
assert obs_type in ('ram', 'image')
self.game = game
self.game_path = atari_py.get_game_path(game)
self.game_mode = mode
self.game_difficulty = difficulty
if not os.path.exists(self.game_path):
msg = 'You asked for game %s but path %s does not exist'
raise IOError(msg % (game, self.game_path))
self._obs_type = obs_type
self.frameskip = frameskip
self.ale = atari_py.ALEInterface()
self.viewer = None
# Tune (or disable) ALE's action repeat:
# https://github.com/openai/gym/issues/349
assert isinstance(repeat_action_probability, (float, int)), \
"Invalid repeat_action_probability: {!r}".format(repeat_action_probability)
self.ale.setFloat(
'repeat_action_probability'.encode('utf-8'),
repeat_action_probability)
self.seed()
self._action_set = (self.ale.getLegalActionSet() if full_action_space
else self.ale.getMinimalActionSet())
self.action_space = spaces.Discrete(len(self._action_set))
(screen_width, screen_height) = self.ale.getScreenDims()
if self._obs_type == 'ram':
self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8, shape=(128,))
elif self._obs_type == 'image':
self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3), dtype=np.uint8)
else:
raise error.Error('Unrecognized observation type: {}'.format(self._obs_type))