本文整理汇总了Python中simulator.Simulator.simulate_n_steps方法的典型用法代码示例。如果您正苦于以下问题:Python Simulator.simulate_n_steps方法的具体用法?Python Simulator.simulate_n_steps怎么用?Python Simulator.simulate_n_steps使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类simulator.Simulator
的用法示例。
在下文中一共展示了Simulator.simulate_n_steps方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import simulate_n_steps [as 别名]
#.........这里部分代码省略.........
sim.setup_problem(A, B, C, D, H, V, W, M, N,
self.robot,
self.enforce_control_constraints,
self.obstacles,
self.goal_position,
self.goal_radius,
self.max_velocity,
self.show_viewer_simulation,
self.robot_file,
self.environment_file)
if self.dynamic_problem:
sim.setup_dynamic_problem(self.simulation_step_size)
successes = 0
num_collisions = 0
rewards_cov = []
final_states= []
num_steps = 0
collided_num = 0
print "LQG: Running " + str(self.num_simulation_runs) + " simulations..."
for k in xrange(self.num_simulation_runs):
self.serializer.write_line("log.log", tmp_dir, "RUN #" + str(k + 1) + " \n")
print "simulation run: " + str(k)
(x_true,
x_tilde,
x_tilde_linear,
x_estimate,
P_t,
current_step,
total_reward,
terminal,
estimated_s,
estimated_c,
history_entries) = sim.simulate_n_steps(xs, us, zs,
control_durations,
xs[0],
np.array([0.0 for i in xrange(2 * self.robot_dof)]),
np.array([0.0 for i in xrange(2 * self.robot_dof)]),
xs[0],
np.array([[0.0 for k in xrange(2 * self.robot_dof)] for l in xrange(2 * self.robot_dof)]),
0.0,
0,
len(xs) - 1,
deviation_covariances,
estimated_deviation_covariances)
if terminal:
successes += 1
rewards_cov.append(total_reward)
#n, min_max, mean, var, skew, kurt = scipy.stats.describe(np.array(rewards_cov))
collided = False
for l in xrange(len(history_entries)):
history_entries[l].set_estimated_covariance(state_covariances[l])
history_entries[l].serialize(tmp_dir, "log.log")
if history_entries[l].collided:
num_collisions += 1
collided = True
if collided:
collided_num += 1
num_steps += history_entries[-1].t
final_states.append(history_entries[-1].x_true)
self.serializer.write_line("log.log", tmp_dir, "Reward: " + str(total_reward) + " \n")
self.serializer.write_line("log.log", tmp_dir, "\n")
""" Calculate the distance to goal area
"""
ee_position_distances = []
示例2: __init__
# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import simulate_n_steps [as 别名]
#.........这里部分代码省略.........
self.evaluation_horizon,
P_t,
deviation_covariance,
estimated_deviation_covariance,
self.timeout)
mean_planning_time += time.time() - t0
mean_number_planning_steps += 1.0
num_generated_paths_run += num_generated_paths
if len(xs) == 0:
logging.error("MPC: Couldn't find any paths from start state" +
str(x_estimate) +
" to goal states")
total_reward = np.array([-self.illegal_move_penalty])[0]
current_step += 1
break
x_tilde = np.array([0.0 for i in xrange(2 * self.robot_dof)])
n_steps = self.num_execution_steps
if n_steps > len(xs) - 1:
n_steps = len(xs) - 1
if current_step + n_steps > self.max_num_steps:
n_steps = self.max_num_steps - current_step
(x_true,
x_tilde,
x_tilde_linear,
x_estimate,
P_t,
current_step,
total_reward,
terminal,
estimated_s,
estimated_c,
history_entries) = sim.simulate_n_steps(xs, us, zs,
control_durations,
x_true,
x_tilde,
x_tilde_linear,
x_estimate,
P_t,
total_reward,
current_step,
n_steps,
0.0,
0.0,
max_num_steps=self.max_num_steps)
#print "len(hist) " + str(len(history_entries))
#print "len(deviation_covariances) " + str(len(deviation_covariances))
#print "len(estimated_deviation_covariances) " + str(len(estimated_deviation_covariances))
deviation_covariance = deviation_covariances[len(history_entries) - 1]
estimated_deviation_covariance = estimated_deviation_covariances[len(history_entries) - 1]
if (current_step == self.max_num_steps) or terminal:
final_states.append(history_entries[-1].x_true)
print "len " + str(len(history_entries))
print "t " + str(history_entries[-1].t)
if terminal:
successful_runs += 1
history_entries[0].set_replanning(True)
for l in xrange(len(history_entries)):
history_entries[l].set_estimated_covariance(state_covariances[l])
history_entries[l].serialize(tmp_dir, "log.log")
if history_entries[l].collided:
num_collisions += 1
collided = True
示例3: __init__
# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import simulate_n_steps [as 别名]
#.........这里部分代码省略.........
Ms[0],
P_ext_t,
self.dynamic_problem)
""" Make sure x_predicted fulfills the constraints """
if self.enforce_constraints:
x_predicted_temp = sim.check_constraints(x_predicted_temp)
predicted_collided = True
if not sim.is_in_collision([], x_predicted_temp)[0]:
x_predicted = x_predicted_temp
predicted_collided = False
else:
print "X_PREDICTED COLLIDES!"
x_predicted = x_estimated
for l in xrange(len(x_predicted) / 2, len(x_predicted)):
x_predicted[l] = 0
last_x_true = np.array([x_true[k] for k in xrange(len(x_true))])
"""
Execute path for 1 time step
"""
(x_true,
x_tilde,
x_tilde_linear,
x_estimated_dash,
z,
P_t,
current_step,
total_reward,
terminal,
estimated_s,
estimated_c,
history_entries) = sim.simulate_n_steps(xs, us, zs,
control_durations,
x_true,
x_estimated,
P_t,
total_reward,
current_step,
1,
0.0,
0.0,
max_num_steps=self.max_num_steps)
history_entries[-1].set_best_reward(objective)
"""
Process history entries
"""
try:
deviation_covariance = deviation_covariances[len(history_entries) - 1]
estimated_deviation_covariance = estimated_deviation_covariances[len(history_entries) - 1]
except:
print "what: len(deviation_covariances) " + str(len(deviation_covariances))
print "len(history_entries) " + str(len(history_entries))
print "len(xs) " + str(len(xs))
history_entries[0].set_replanning(True)
for l in xrange(len(history_entries)):
try:
history_entries[l].set_estimated_covariance(state_covariances[l])
except:
print "l " + str(l)
print "len(state_covariances) " + str(len(state_covariances))