本文整理汇总了Python中simulator.Simulator.setup_reward_function方法的典型用法代码示例。如果您正苦于以下问题:Python Simulator.setup_reward_function方法的具体用法?Python Simulator.setup_reward_function怎么用?Python Simulator.setup_reward_function使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类simulator.Simulator
的用法示例。
在下文中一共展示了Simulator.setup_reward_function方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import setup_reward_function [as 别名]
def __init__(self, show_scene, deserialize, append_paths):
self.abs_path = os.path.dirname(os.path.abspath(__file__))
""" Reading the config """
warnings.filterwarnings("ignore")
self.init_serializer()
config = self.serializer.read_config("config_lqg.yaml", path=self.abs_path)
self.set_params(config)
if self.seed < 0:
"""
Generate a random seed that will be stored
"""
self.seed = np.random.randint(0, 4294967)
np.random.seed(self.seed)
logging_level = logging.WARN
if config['verbose']:
logging_level = logging.DEBUG
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging_level)
np.set_printoptions(precision=16)
dir = self.abs_path + "/stats/lqg"
tmp_dir = self.abs_path + "/tmp/lqg"
self.utils = Utils()
if not self.init_robot(self.robot_file):
logging.error("LQG: Couldn't initialize robot")
return
if not self.setup_scene(self.environment_file, self.robot):
return
#self.show_state_distribution(urdf_model_file, environment_file)
#self.check_continuous_collide(self.robot_file, self.environment_file)
if show_scene:
self.test_sensor(self.robot_file, self.environment_file, self.abs_path + "/model/sensor.xml")
self.run_viewer(self.robot_file, self.environment_file)
self.clear_stats(dir)
logging.info("Start up simulator")
sim = Simulator()
path_evaluator = PathEvaluator()
path_planner = PathPlanningInterface()
logging.info("LQG: Generating goal states...")
goal_states = get_goal_states("lqg",
self.abs_path,
self.serializer,
self.obstacles,
self.robot,
self.max_velocity,
self.delta_t,
self.start_state,
self.goal_position,
self.goal_radius,
self.planning_algortihm,
self.path_timeout,
self.num_generated_goal_states,
self.continuous_collision,
self.environment_file,
self.num_cores)
if len(goal_states) == 0:
logging.error("LQG: Couldn't generate any goal states. Problem seems to be infeasible")
return
logging.info("LQG: Generated " + str(len(goal_states)) + " goal states")
sim.setup_reward_function(self.discount_factor, self.step_penalty, self.illegal_move_penalty, self.exit_reward)
path_planner.setup(self.robot,
self.obstacles,
self.max_velocity,
self.delta_t,
self.use_linear_path,
self.planning_algortihm,
self.path_timeout,
self.continuous_collision,
self.num_cores)
if self.dynamic_problem:
path_planner.setup_dynamic_problem(self.robot_file,
self.environment_file,
self.simulation_step_size,
self.num_control_samples,
self.min_control_duration,
self.max_control_duration,
self.add_intermediate_states,
self.rrt_goal_bias,
self.control_sampler)
path_planner.set_start_and_goal(self.start_state, goal_states, self.goal_position, self.goal_radius)
A, H, B, V, W, C, D, M_base, N_base = self.problem_setup(self.delta_t, self.robot_dof)
time_to_generate_paths = 0.0
if check_positive_definite([C, D]):
m_covs = None
if self.inc_covariance == "process":
m_covs = np.linspace(self.min_process_covariance,
self.max_process_covariance,
self.covariance_steps)
elif self.inc_covariance == "observation":
m_covs = np.linspace(self.min_observation_covariance,
self.max_observation_covariance,
self.covariance_steps)
emds = []
mean_planning_times = []
time_to_generate_paths = 0.0
paths = []
if ((not append_paths) and deserialize):
paths = self.serializer.deserialize_paths(self.abs_path + "/paths.txt", self.robot_dof)
#.........这里部分代码省略.........
示例2: __init__
# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import setup_reward_function [as 别名]
def __init__(self, plot):
self.abs_path = os.path.dirname(os.path.abspath(__file__))
""" Reading the config """
warnings.filterwarnings("ignore")
self.init_serializer()
config = self.serializer.read_config("config_mpc.yaml", path=self.abs_path)
self.set_params(config)
if self.seed < 0:
"""
Generate a random seed that will be stored
"""
self.seed = np.random.randint(0, 4294967295)
np.random.seed(self.seed)
logging_level = logging.WARN
if config['verbose']:
logging_level = logging.DEBUG
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging_level)
np.set_printoptions(precision=16)
dir = self.abs_path + "/stats/mpc"
tmp_dir = self.abs_path + "/tmp/mpc"
self.utils = Utils()
if not self.init_robot(self.robot_file):
logging.error("MPC: Couldn't initialize robot")
return
if not self.setup_scene(self.environment_file, self.robot):
return
#self.run_viewer(self.robot_file, self.environment_file)
self.clear_stats(dir)
logging.info("Start up simulator")
sim = Simulator()
path_evaluator = PathEvaluator()
path_planner = PathPlanningInterface()
logging.info("MPC: Generating goal states...")
goal_states = get_goal_states("mpc",
self.abs_path,
self.serializer,
self.obstacles,
self.robot,
self.max_velocity,
self.delta_t,
self.start_state,
self.goal_position,
self.goal_radius,
self.planning_algortihm,
self.path_timeout,
self.num_generated_goal_states,
self.continuous_collision,
self.environment_file,
self.num_cores)
if len(goal_states) == 0:
logging.error("MPC: Couldn't generate any goal states. Problem seems to be infeasible")
return
logging.info("MPC: Generated " + str(len(goal_states)) + " goal states")
sim.setup_reward_function(self.discount_factor, self.step_penalty, self.illegal_move_penalty, self.exit_reward)
path_planner.setup(self.robot,
self.obstacles,
self.max_velocity,
self.delta_t,
self.use_linear_path,
self.planning_algortihm,
self.path_timeout,
self.continuous_collision,
self.num_cores)
if self.dynamic_problem:
path_planner.setup_dynamic_problem(self.robot_file,
self.environment_file,
self.simulation_step_size,
self.num_control_samples,
self.min_control_duration,
self.max_control_duration,
self.add_intermediate_states,
self.rrt_goal_bias,
self.control_sampler)
A, H, B, V, W, C, D, M_base, N_base = self.problem_setup(self.delta_t, self.robot_dof)
time_to_generate_paths = 0.0
if check_positive_definite([C, D]):
m_covs = None
if self.inc_covariance == "process":
m_covs = np.linspace(self.min_process_covariance,
self.max_process_covariance,
self.covariance_steps)
elif self.inc_covariance == "observation":
m_covs = np.linspace(self.min_observation_covariance,
self.max_observation_covariance,
self.covariance_steps)
for j in xrange(len(m_covs)):
M = None
N = None
if self.inc_covariance == "process":
""" The process noise covariance matrix """
M = self.calc_covariance_value(self.robot, m_covs[j], M_base)
#M = m_covs[j] * M_base
""" The observation error covariance matrix """
N = self.calc_covariance_value(self.robot,
self.min_observation_covariance,
N_base,
#.........这里部分代码省略.........
示例3: __init__
# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import setup_reward_function [as 别名]
def __init__(self):
self.abs_path = os.path.dirname(os.path.abspath(__file__))
cmd = "rm -rf " + self.abs_path + "/tmp"
popen = subprocess.Popen(cmd, cwd=self.abs_path, shell=True)
popen.wait()
""" Reading the config """
warnings.filterwarnings("ignore")
self.init_serializer()
config = self.serializer.read_config("config_hrf.yaml", path=self.abs_path)
self.set_params(config)
if self.seed < 0:
"""
Generate a random seed that will be stored
"""
self.seed = np.random.randint(0, 42949672)
np.random.seed(self.seed)
logging_level = logging.WARN
if config['verbose']:
logging_level = logging.DEBUG
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging_level)
np.set_printoptions(precision=16)
dir = self.abs_path + "/stats/hrf"
tmp_dir = self.abs_path + "/tmp/hrf"
self.utils = Utils()
if not self.init_robot(self.robot_file):
logging.error("HRF: Couldn't initialize robot")
return
if not self.setup_scene(self.environment_file, self.robot):
return
self.clear_stats(dir)
logging.info("Start up simulator")
sim = Simulator()
plan_adjuster = PlanAdjuster()
path_evaluator = PathEvaluator()
path_planner = PathPlanningInterface()
if self.show_viewer_simulation:
self.robot.setupViewer(self.robot_file, self.environment_file)
logging.info("HRF: Generating goal states...")
problem_feasible = False
while not problem_feasible:
print "Creating random scene..."
#self.create_random_obstacles(20)
goal_states = get_goal_states("hrf",
self.abs_path,
self.serializer,
self.obstacles,
self.robot,
self.max_velocity,
self.delta_t,
self.start_state,
self.goal_position,
self.goal_radius,
self.planning_algortihm,
self.path_timeout,
self.num_generated_goal_states,
self.continuous_collision,
self.environment_file,
self.num_cores)
if len(goal_states) == 0:
logging.error("HRF: Couldn't generate any goal states. Problem seems to be infeasible")
else:
problem_feasible = True
'''obst = v_obstacle()
obst[:] = self.obstacles
self.robot.addObstacles(obst)'''
logging.info("HRF: Generated " + str(len(goal_states)) + " goal states")
sim.setup_reward_function(self.discount_factor, self.step_penalty, self.illegal_move_penalty, self.exit_reward)
path_planner.setup(self.robot,
self.obstacles,
self.max_velocity,
self.delta_t,
self.use_linear_path,
self.planning_algortihm,
self.path_timeout,
self.continuous_collision,
self.num_cores)
if self.dynamic_problem:
path_planner.setup_dynamic_problem(self.robot_file,
self.environment_file,
self.simulation_step_size,
self.num_control_samples,
self.min_control_duration,
self.max_control_duration,
self.add_intermediate_states,
self.rrt_goal_bias,
self.control_sampler)
A, H, B, V, W, C, D, M_base, N_base = self.problem_setup(self.delta_t, self.robot_dof)
time_to_generate_paths = 0.0
if check_positive_definite([C, D]):
m_covs = None
if self.inc_covariance == "process":
m_covs = np.linspace(self.min_process_covariance,
self.max_process_covariance,
self.covariance_steps)
elif self.inc_covariance == "observation":
#.........这里部分代码省略.........