本文整理汇总了Python中World.World.get_frame_list方法的典型用法代码示例。如果您正苦于以下问题:Python World.get_frame_list方法的具体用法?Python World.get_frame_list怎么用?Python World.get_frame_list使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类World.World
的用法示例。
在下文中一共展示了World.get_frame_list方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from World import World [as 别名]
# 或者: from World.World import get_frame_list [as 别名]
class Interaction:
'''Finite state machine for the human interaction'''
_is_programming = True
_is_recording_motion = False
_arm_trajectory = None
_trajectory_start_time = None
def __init__(self):
self.arms = Arms()
self.world = World()
self.session = Session(object_list=self.world.get_frame_list(),
is_debug=True)
self._viz_publisher = rospy.Publisher('visualization_marker_array',
MarkerArray)
rospy.Subscriber('recognized_command', Command, self.speech_command_cb)
rospy.Subscriber('gui_command', GuiCommand, self.gui_command_cb)
self._undo_function = None
self.responses = {
Command.TEST_MICROPHONE: Response(Interaction.empty_response,
[RobotSpeech.TEST_RESPONSE, GazeGoal.NOD]),
Command.NEW_DEMONSTRATION: Response(self.create_action, None),
Command.TAKE_TOOL: Response(self.take_tool, 0),
Command.RELEASE_TOOL: Response(self.open_hand, 0),
Command.DETECT_SURFACE: Response(self.record_object_pose, None),
Command.START_RECORDING: Response(self.start_recording, None),
Command.STOP_RECORDING: Response(self.stop_recording, None),
Command.REPLAY_DEMONSTRATION: Response(self.execute_action, None),
Command.SAVE_ARM_POSE: Response(self.save_arm_pose, None)
}
rospy.loginfo('Will wait until arms ready to respond.')
while ((self.arms.get_ee_state(0) is None) or
(self.arms.get_ee_state(1) is None)):
time.sleep(0.1)
rospy.loginfo('Starting to move to the initial pose.')
self._move_to_arm_pose('initial', 0)
self._move_to_arm_pose('initial', 1)
rospy.loginfo('Interaction initialized.')
def load_known_arm_poses(self):
'''This loads important poses from the hard drive'''
# TODO
pass
def open_hand(self, arm_index):
'''Opens gripper on the indicated side'''
if self.arms.set_gripper_state(arm_index, GripperState.OPEN):
speech_response = Response.open_responses[arm_index]
if (Interaction._is_programming and self.session.n_actions() > 0):
self.save_gripper_step(arm_index, GripperState.OPEN)
speech_response = (speech_response + ' ' +
RobotSpeech.STEP_RECORDED)
return [speech_response, Response.glance_actions[arm_index]]
else:
return [Response.already_open_responses[arm_index],
Response.glance_actions[arm_index]]
def take_tool(self, arm_index):
self.close_hand(arm_index)
self._move_to_arm_pose('initial', arm_index)
def close_hand(self, arm_index):
'''Closes gripper on the indicated side'''
if Arms.set_gripper_state(arm_index, GripperState.CLOSED):
speech_response = Response.close_responses[arm_index]
return [speech_response, Response.glance_actions[arm_index]]
else:
return [Response.already_closed_responses[arm_index],
Response.glance_actions[arm_index]]
def relax_arm(self, arm_index):
'''Relaxes arm on the indicated side'''
if self.arms.set_arm_mode(arm_index, ArmMode.RELEASE):
return [Response.release_responses[arm_index],
Response.glance_actions[arm_index]]
else:
return [Response.already_released_responses[arm_index],
Response.glance_actions[arm_index]]
def freeze_arm(self, arm_index):
'''Stiffens arm on the indicated side'''
if self.arms.set_arm_mode(arm_index, ArmMode.HOLD):
return [Response.hold_responses[arm_index],
Response.glance_actions[arm_index]]
else:
return [Response.already_holding_responses[arm_index],
Response.glance_actions[arm_index]]
def edit_action(self, dummy=None):
'''Goes back to edit mode'''
if (self.session.n_actions() > 0):
if (Interaction._is_programming):
return [RobotSpeech.ALREADY_EDITING, GazeGoal.SHAKE]
#.........这里部分代码省略.........
示例2: __init__
# 需要导入模块: from World import World [as 别名]
# 或者: from World.World import get_frame_list [as 别名]
class Interaction:
'''Finite state machine for the human interaction'''
_arm_trajectory = None
_trajectory_start_time = None
def __init__(self):
self.arms = Arms()
self.world = World()
self.session = Session(object_list=self.world.get_frame_list(),
is_debug=True)
self._viz_publisher = rospy.Publisher('visualization_marker_array', MarkerArray)
self._demo_state = None
self._is_busy = True
rospy.Subscriber('recognized_command', Command, self.speech_command_cb)
rospy.Subscriber('gui_command', GuiCommand, self.gui_command_cb)
self.responses = {
Command.TEST_MICROPHONE: Response(self.default_response,
[RobotSpeech.TEST_RESPONSE, GazeGoal.NOD]),
Command.TAKE_TOOL: Response(self.take_tool, 0),
Command.RELEASE_TOOL: Response(self.release_tool, 0),
Command.START_RECORDING: Response(self.start_recording, None),
Command.STOP_RECORDING: Response(self.stop_recording, None),
Command.REPLAY_DEMONSTRATION: Response(self.replay_demonstration, None)
Command.DETECT_SURFACE: Response(self.detect_surface, None)
}
rospy.loginfo('Will wait until arms ready to respond.')
while ((self.arms.get_ee_state(0) is None) or
(self.arms.get_ee_state(1) is None)):
time.sleep(0.1)
rospy.loginfo('Starting to move to the initial pose.')
# TODO: Make it possible to take with either hand
self._move_to_arm_pose('take', 0)
self._move_to_arm_pose('away', 1)
self._wait_for_arms()
self._demo_state = DemoState.READY_TO_TAKE
Response.say(RobotSpeech.HAND_TOOL_REQUEST)
Response.perform_gaze_action(GazeGoal.GLANCE_RIGHT_EE)
self._is_busy = False
rospy.loginfo('Interaction initialized.')
def take_tool(self, arm_index):
'''Robot's response to TAKE_TOOL'''
self._is_busy = True
if self._demo_state == DemoState.READY_TO_TAKE:
## Robot closes the hand
Arms.set_gripper_state(arm_index, GripperState.CLOSED, wait=True)
## Robot moves the hand near the camera to take a look at the tool
self._move_to_arm_pose('look', arm_index, wait=True)
self.tool_id = self.world.get_tool_id()
if self.tool_id is None:
## Robot moves the arm back to the person can take the tool
self._move_to_arm_pose('take', 0, wait=True)
Response.say(RobotSpeech.ERROR_TOOL_NOT_RECOGNIZED)
Response.perform_gaze_action(GazeGoal.SHAKE)
self._demo_state = DemoState.NO_TOOL_NO_SURFACE
else:
self.session.new_action(self.tool_id)
Response.say(RobotSpeech.RECOGNIZED_TOOL + str(self.tool_id))
self._demo_state = DemoState.HAS_TOOL_NO_SURFACE
self.detect_surface()
else:
Response.say(RobotSpeech.ERROR_NOT_IN_TAKE_STATE)
rospy.loginfo('Current state: ' + self._demo_state)
self._is_busy = False
def detect_surface(self):
self._is_busy = True
if self._demo_state == DemoState.HAS_TOOL_NO_SURFACE:
## Robot moves the arm away and looks at the surface
self._move_to_arm_pose('away', 0, wait=True)
self.surface = self.world.get_surface()
if self.surface is None:
Response.say(RobotSpeech.ERROR_NO_SURFACE)
Response.perform_gaze_action(GazeGoal.SHAKE)
else:
Response.say(RobotSpeech.SURFACE_DETECTED)
self._move_to_arm_pose('ready', arm_index, wait=True)
Response.say(RobotSpeech.READY_FOR_DEMO)
self._demo_state = DemoState.READY_FOR_DEMO
self._is_busy = False
def release_tool(self, arm_index):
self.busy = True
if (self._demo_state != DemoState.READY_TO_TAKE and
self._demo_state != DemoState.PLAYING_DEMO and
self._demo_state != DemoState.RECORDING_DEMO):
self._move_to_arm_pose('take', 0, wait=True)
self.arms.set_gripper_state(arm_index, GripperState.OPEN, wait=True)
#.........这里部分代码省略.........