本文整理汇总了Python中Session.Session.n_actions方法的典型用法代码示例。如果您正苦于以下问题:Python Session.n_actions方法的具体用法?Python Session.n_actions怎么用?Python Session.n_actions使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Session.Session
的用法示例。
在下文中一共展示了Session.n_actions方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from Session import Session [as 别名]
# 或者: from Session.Session import n_actions [as 别名]
class Interaction:
'''Finite state machine for the human interaction'''
_is_programming = True
_is_recording_motion = False
_arm_trajectory = None
_trajectory_start_time = None
def __init__(self):
self.arms = Arms()
self.world = World()
self.session = Session(object_list=self.world.get_frame_list(),
is_debug=True)
self._viz_publisher = rospy.Publisher('visualization_marker_array',
MarkerArray)
rospy.Subscriber('recognized_command', Command, self.speech_command_cb)
rospy.Subscriber('gui_command', GuiCommand, self.gui_command_cb)
self._undo_function = None
self.responses = {
Command.TEST_MICROPHONE: Response(Interaction.empty_response,
[RobotSpeech.TEST_RESPONSE, GazeGoal.NOD]),
Command.NEW_DEMONSTRATION: Response(self.create_action, None),
Command.TAKE_TOOL: Response(self.take_tool, 0),
Command.RELEASE_TOOL: Response(self.open_hand, 0),
Command.DETECT_SURFACE: Response(self.record_object_pose, None),
Command.START_RECORDING: Response(self.start_recording, None),
Command.STOP_RECORDING: Response(self.stop_recording, None),
Command.REPLAY_DEMONSTRATION: Response(self.execute_action, None),
Command.SAVE_ARM_POSE: Response(self.save_arm_pose, None)
}
rospy.loginfo('Will wait until arms ready to respond.')
while ((self.arms.get_ee_state(0) is None) or
(self.arms.get_ee_state(1) is None)):
time.sleep(0.1)
rospy.loginfo('Starting to move to the initial pose.')
self._move_to_arm_pose('initial', 0)
self._move_to_arm_pose('initial', 1)
rospy.loginfo('Interaction initialized.')
def load_known_arm_poses(self):
'''This loads important poses from the hard drive'''
# TODO
pass
def open_hand(self, arm_index):
'''Opens gripper on the indicated side'''
if self.arms.set_gripper_state(arm_index, GripperState.OPEN):
speech_response = Response.open_responses[arm_index]
if (Interaction._is_programming and self.session.n_actions() > 0):
self.save_gripper_step(arm_index, GripperState.OPEN)
speech_response = (speech_response + ' ' +
RobotSpeech.STEP_RECORDED)
return [speech_response, Response.glance_actions[arm_index]]
else:
return [Response.already_open_responses[arm_index],
Response.glance_actions[arm_index]]
def take_tool(self, arm_index):
self.close_hand(arm_index)
self._move_to_arm_pose('initial', arm_index)
def close_hand(self, arm_index):
'''Closes gripper on the indicated side'''
if Arms.set_gripper_state(arm_index, GripperState.CLOSED):
speech_response = Response.close_responses[arm_index]
return [speech_response, Response.glance_actions[arm_index]]
else:
return [Response.already_closed_responses[arm_index],
Response.glance_actions[arm_index]]
def relax_arm(self, arm_index):
'''Relaxes arm on the indicated side'''
if self.arms.set_arm_mode(arm_index, ArmMode.RELEASE):
return [Response.release_responses[arm_index],
Response.glance_actions[arm_index]]
else:
return [Response.already_released_responses[arm_index],
Response.glance_actions[arm_index]]
def freeze_arm(self, arm_index):
'''Stiffens arm on the indicated side'''
if self.arms.set_arm_mode(arm_index, ArmMode.HOLD):
return [Response.hold_responses[arm_index],
Response.glance_actions[arm_index]]
else:
return [Response.already_holding_responses[arm_index],
Response.glance_actions[arm_index]]
def edit_action(self, dummy=None):
'''Goes back to edit mode'''
if (self.session.n_actions() > 0):
if (Interaction._is_programming):
return [RobotSpeech.ALREADY_EDITING, GazeGoal.SHAKE]
#.........这里部分代码省略.........
示例2: __init__
# 需要导入模块: from Session import Session [as 别名]
# 或者: from Session.Session import n_actions [as 别名]
#.........这里部分代码省略.........
Arms.get_joint_state(1)]
states = [None, None]
for arm_index in [0, 1]:
nearest_obj = self.world.get_nearest_object(
abs_ee_poses[arm_index])
if (nearest_obj == None):
states[arm_index] = ArmState(ArmState.ROBOT_BASE,
abs_ee_poses[arm_index],
joint_poses[arm_index], Object())
else:
# Relative
rel_ee_pose = World.transform(
abs_ee_poses[arm_index],
'base_link', nearest_obj.name)
states[arm_index] = ArmState(ArmState.OBJECT,
rel_ee_pose,
joint_poses[arm_index], nearest_obj)
return states
def speech_command_cb(self, command):
'''Callback for when a speech command is received'''
if command.command in self.responses.keys():
rospy.loginfo('\033[32m Calling response for command ' +
command.command + '\033[0m')
response = self.responses[command.command]
if (not self.arms.is_executing() and not self._is_busy):
response.respond()
else:
if command.command == Command.STOP_EXECUTION:
response.respond()
else:
rospy.logwarn('Ignoring speech command during execution or busy: '
+ command.command)
else:
switch_command = 'SWITCH_TO_ACTION'
if (switch_command in command.command):
action_no = command.command[
len(switch_command):len(command.command)]
action_no = int(action_no)
if (self.session.n_actions() > 0):
self.session.switch_to_action(action_no,
self.world.get_frame_list())
response = Response(self.default_response,
[RobotSpeech.SWITCH_SKILL + str(action_no),
GazeGoal.NOD])
else:
response = Response(self.default_response,
[RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE])
response.respond()
else:
rospy.logwarn('\033[32m This command (' + command.command
+ ') is unknown. \033[0m')
def gui_command_cb(self, command):
'''Callback for when a GUI command is received'''
if (not self.arms.is_executing()):
if (self.session.n_actions() > 0):
if (command.command == GuiCommand.SWITCH_TO_ACTION):
action_no = command.param
self.session.switch_to_action(action_no,
self.world.get_frame_list())
response = Response(self.default_response,
[RobotSpeech.SWITCH_SKILL + str(action_no),
GazeGoal.NOD])
response.respond()
elif (command.command == GuiCommand.SELECT_ACTION_STEP):
step_no = command.param
self.session.select_action_step(step_no)
rospy.loginfo('Selected action step ' + str(step_no))
else:
rospy.logwarn('\033[32m This command (' + command.command
+ ') is unknown. \033[0m')
else:
response = Response(self.default_response,
[RobotSpeech.ERROR_NO_SKILLS, GazeGoal.SHAKE])
response.respond()
else:
rospy.logwarn('Ignoring GUI command during execution: ' +
command.command)
def update(self):
'''General update for the main loop'''
self.arms.update()
if (self.arms.status != ExecutionStatus.NOT_EXECUTING):
if (self.arms.status != ExecutionStatus.EXECUTING):
self._end_replay()
if (self._demo_state == DemoState.RECORDING_DEMO):
self._save_arm_to_trajectory()
if (self.session.n_actions() > 0):
action = self.session.get_current_action()
action.update_viz()
time.sleep(0.1)