本文整理汇总了Python中Session.Session.n_frames方法的典型用法代码示例。如果您正苦于以下问题:Python Session.n_frames方法的具体用法?Python Session.n_frames怎么用?Python Session.n_frames使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Session.Session
的用法示例。
在下文中一共展示了Session.n_frames方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from Session import Session [as 别名]
# 或者: from Session.Session import n_frames [as 别名]
#.........这里部分代码省略.........
self.session.new_action()
Interaction._is_programming = True
return [RobotSpeech.SKILL_CREATED + ' ' +
str(self.session.current_action_index), GazeGoal.NOD]
def next_action(self, dummy=None):
'''Switches to next action'''
if (self.session.n_actions() > 0):
if self.session.next_action(self.world.get_frame_list()):
return [RobotSpeech.SWITCH_SKILL + ' ' +
str(self.session.current_action_index), GazeGoal.NOD]
else:
return [RobotSpeech.ERROR_NEXT_SKILL + ' ' +
str(self.session.current_action_index), GazeGoal.SHAKE]
else:
return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE]
def previous_action(self, dummy=None):
'''Switches to previous action'''
if (self.session.n_actions() > 0):
if self.session.previous_action(self.world.get_frame_list()):
return [RobotSpeech.SWITCH_SKILL + ' ' +
str(self.session.current_action_index), GazeGoal.NOD]
else:
return [RobotSpeech.ERROR_PREV_SKILL + ' ' +
str(self.session.current_action_index), GazeGoal.SHAKE]
else:
return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE]
def delete_all_steps(self, dummy=None):
'''Deletes all steps in the current action'''
if (self.session.n_actions() > 0):
if (Interaction._is_programming):
if self.session.n_frames() > 0:
self.session.clear_current_action()
self._undo_function = self._resume_all_steps
return [RobotSpeech.SKILL_CLEARED, GazeGoal.NOD]
else:
return [RobotSpeech.SKILL_EMPTY, None]
else:
return ['Action ' + str(self.session.current_action_index) +
RobotSpeech.ERROR_NOT_IN_EDIT, GazeGoal.SHAKE]
else:
return [RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE]
def _resume_all_steps(self):
'''Resumes all steps after clearing'''
self.session.undo_clear()
return [RobotSpeech.ALL_POSES_RESUMED, GazeGoal.NOD]
def _resume_last_step(self):
'''Resumes last step after deleting'''
self.session.resume_deleted_step()
return [RobotSpeech.POSE_RESUMED, GazeGoal.NOD]
def stop_execution(self, dummy=None):
'''Stops ongoing execution'''
if (self.arms.is_executing()):
self.arms.stop_execution()
return [RobotSpeech.STOPPING_EXECUTION, GazeGoal.NOD]
else:
return [RobotSpeech.ERROR_NO_EXECUTION, GazeGoal.SHAKE]
def save_gripper_step(self, arm_index, gripper_state):
'''Saves an action step that involves a gripper state change'''
if (self.session.n_actions() > 0):
示例2: __init__
# 需要导入模块: from Session import Session [as 别名]
# 或者: from Session.Session import n_frames [as 别名]
#.........这里部分代码省略.........
Response.say(RobotSpeech.SURFACE_DETECTED)
self._move_to_arm_pose('ready', arm_index, wait=True)
Response.say(RobotSpeech.READY_FOR_DEMO)
self._demo_state = DemoState.READY_FOR_DEMO
self._is_busy = False
def release_tool(self, arm_index):
self.busy = True
if (self._demo_state != DemoState.READY_TO_TAKE and
self._demo_state != DemoState.PLAYING_DEMO and
self._demo_state != DemoState.RECORDING_DEMO):
self._move_to_arm_pose('take', 0, wait=True)
self.arms.set_gripper_state(arm_index, GripperState.OPEN, wait=True)
Response.say(RobotSpeech.TOOL_RELEASED)
Response.perform_gaze_action(GazeGoal.GLANCE_RIGHT_EE)
self._demo_state = DemoState.READY_TO_TAKE
else:
Response.perform_gaze_action(GazeGoal.SHAKE)
Response.say(RobotSpeech.ERROR_NOT_IN_RELEASE_STATE)
rospy.loginfo('Current state: ' + self._demo_state)
self.busy = False
def start_recording(self, dummy=None):
'''Starts recording continuous motion'''
self.busy = True
if (self._demo_state == DemoState.READY_FOR_DEMO or
self._demo_state == DemoState.HAS_RECORDED_DEMO):
Interaction._arm_trajectory = ArmTrajectory()
Interaction._trajectory_start_time = rospy.Time.now()
if self.session.n_frames() > 0:
self.session.clear_current_action()
self.relax_arm(0)
self._demo_state = DemoState.RECORDING_DEMO
Response.say(RobotSpeech.STARTED_RECORDING)
Response.perform_gaze_action(GazeGoal.NOD)
elif (self._demo_state == DemoState.RECORDING_DEMO):
Response.perform_gaze_action(GazeGoal.SHAKE)
Response.say(RobotSpeech.ERROR_ALREADY_RECORDING)
else:
Response.perform_gaze_action(GazeGoal.SHAKE)
Response.say(RobotSpeech.ERROR_NOT_READY_TO_RECORD)
rospy.loginfo('Current state: ' + self._demo_state)
self.busy = False
def stop_recording(self, dummy=None):
'''Stops recording continuous motion'''
self.busy = True
if (self._demo_state == DemoState.RECORDING_DEMO):
traj_step = ActionStep()
traj_step.type = ActionStep.ARM_TRAJECTORY
waited_time = Interaction._arm_trajectory.timing[0]
for i in range(len(Interaction._arm_trajectory.timing)):
Interaction._arm_trajectory.timing[i] -= waited_time
Interaction._arm_trajectory.timing[i] += rospy.Duration(0.1)
'''If motion was relative, record transformed pose'''