本文整理汇总了Python中Session.Session.add_step_to_action方法的典型用法代码示例。如果您正苦于以下问题:Python Session.add_step_to_action方法的具体用法?Python Session.add_step_to_action怎么用?Python Session.add_step_to_action使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Session.Session
的用法示例。
在下文中一共展示了Session.add_step_to_action方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from Session import Session [as 别名]
# 或者: from Session.Session import add_step_to_action [as 别名]
#.........这里部分代码省略.........
else:
return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE]
def _resume_all_steps(self):
'''Resumes all steps after clearing'''
self.session.undo_clear()
return [RobotSpeech.ALL_POSES_RESUMED, GazeGoal.NOD]
def _resume_last_step(self):
'''Resumes last step after deleting'''
self.session.resume_deleted_step()
return [RobotSpeech.POSE_RESUMED, GazeGoal.NOD]
def stop_execution(self, dummy=None):
'''Stops ongoing execution'''
if (self.arms.is_executing()):
self.arms.stop_execution()
return [RobotSpeech.STOPPING_EXECUTION, GazeGoal.NOD]
else:
return [RobotSpeech.ERROR_NO_EXECUTION, GazeGoal.SHAKE]
def save_gripper_step(self, arm_index, gripper_state):
'''Saves an action step that involves a gripper state change'''
if (self.session.n_actions() > 0):
if (Interaction._is_programming):
states = self._get_arm_states()
step = ActionStep()
step.type = ActionStep.ARM_TARGET
step.armTarget = ArmTarget(states[0], states[1], 0.2, 0.2)
actions = [self.arms.get_gripper_state(0),
self.arms.get_gripper_state(1)]
actions[arm_index] = gripper_state
step.gripperAction = GripperAction(actions[0], actions[1])
self.session.add_step_to_action(step,
self.world.get_frame_list())
def start_recording(self, dummy=None):
'''Starts recording continuous motion'''
if (self.session.n_actions() > 0):
if (Interaction._is_programming):
if (not Interaction._is_recording_motion):
Interaction._is_recording_motion = True
Interaction._arm_trajectory = ArmTrajectory()
Interaction._trajectory_start_time = rospy.Time.now()
if self.session.n_frames() > 0:
self.session.clear_current_action()
return [RobotSpeech.STARTED_RECORDING_MOTION,
GazeGoal.NOD]
else:
return [RobotSpeech.ALREADY_RECORDING_MOTION,
GazeGoal.SHAKE]
else:
return ['Action ' + str(self.session.current_action_index) +
RobotSpeech.ERROR_NOT_IN_EDIT, GazeGoal.SHAKE]
else:
return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE]
def stop_recording(self, dummy=None):
'''Stops recording continuous motion'''
if (Interaction._is_recording_motion):
Interaction._is_recording_motion = False
traj_step = ActionStep()
traj_step.type = ActionStep.ARM_TRAJECTORY
示例2: __init__
# 需要导入模块: from Session import Session [as 别名]
# 或者: from Session.Session import add_step_to_action [as 别名]
#.........这里部分代码省略.........
rospy.loginfo('Current state: ' + self._demo_state)
self.busy = False
def stop_recording(self, dummy=None):
'''Stops recording continuous motion'''
self.busy = True
if (self._demo_state == DemoState.RECORDING_DEMO):
traj_step = ActionStep()
traj_step.type = ActionStep.ARM_TRAJECTORY
waited_time = Interaction._arm_trajectory.timing[0]
for i in range(len(Interaction._arm_trajectory.timing)):
Interaction._arm_trajectory.timing[i] -= waited_time
Interaction._arm_trajectory.timing[i] += rospy.Duration(0.1)
'''If motion was relative, record transformed pose'''
traj_step.armTrajectory = ArmTrajectory(
Interaction._arm_trajectory.rArm[:],
Interaction._arm_trajectory.lArm[:],
Interaction._arm_trajectory.timing[:],
Interaction._arm_trajectory.rRefFrame,
Interaction._arm_trajectory.lRefFrame,
Interaction._arm_trajectory.rRefFrameObject,
Interaction._arm_trajectory.lRefFrameObject)
traj_step.gripperAction = GripperAction(
self.arms.get_gripper_state(0),
self.arms.get_gripper_state(1))
self.session.add_step_to_action(traj_step,
self.world.get_frame_list())
Interaction._arm_trajectory = None
Interaction._trajectory_start_time = None
self.session.save_current_action()
self.freeze_arm(0)
self._demo_state = DemoState.HAS_RECORDED_DEMO
Response.say(RobotSpeech.STOPPED_RECORDING)
Response.perform_gaze_action(GazeGoal.NOD)
else:
Response.say(RobotSpeech.ERROR_NOT_RECORDING)
Response.perform_gaze_action(GazeGoal.SHAKE)
rospy.loginfo('Current state: ' + self._demo_state)
self.busy = False
def replay_demonstration(self, dummy=None):
'''Starts the execution of the current demonstration'''
self.busy = True
execution_z_offset = 0.00
if (self._demo_state == DemoState.HAS_RECORDED_DEMO):
self.session.save_current_action()
action = self.session.get_current_action()
self.arms.start_execution(action, execution_z_offset)
Response.say(RobotSpeech.STARTED_REPLAY)
self._demo_state = DemoState.PLAYING_DEMO
else: