本文整理汇总了Python中geometry_msgs.msg.Pose.orientation方法的典型用法代码示例。如果您正苦于以下问题:Python Pose.orientation方法的具体用法?Python Pose.orientation怎么用?Python Pose.orientation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类geometry_msgs.msg.Pose
的用法示例。
在下文中一共展示了Pose.orientation方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: move_to_box
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def move_to_box(objcolorl):
if objcolorl == 0:
#move to green box
pose = Pose()
pose.orientation = Quaternion(1.00, 0.0, 0.00, 0.00)
pose.position = Point(0.570, -0.176, 0.283)
path = '/home/ynazon1/Pictures/green_success.png'
img = cv2.imread(path)
msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
pubpic = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)
pubpic.publish(msg)
# Sleep to allow for image to be published.
rospy.sleep(1)
else:
#move to blue box
pose = Pose()
pose.orientation = Quaternion(1.00, 0.0, 0.00, 0.00)
pose.position = Point(0.708, -0.153, 0.258)
path = '/home/ynazon1/Pictures/red_success.png'
img = cv2.imread(path)
msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
pubpic = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)
pubpic.publish(msg)
# Sleep to allow for image to be published.
rospy.sleep(1)
request_pose(pose, "left", left_group)
示例2: calculate_mk2_ik
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def calculate_mk2_ik(self):
#goal_point = Point(0.298, -0.249, -0.890) home position
goal_pose = Pose()
# Goto position 1
goal_point = Point(0.298, -0.249, -0.890)
#goal_point = Point(0.298, -0.5, -1.0)
goal_pose.position = goal_point
quat = quaternion_from_euler(0.0, 0.0, 0.0) # roll, pitch, yaw
goal_pose.orientation = Quaternion(*quat.tolist())
moveit_goal = self.create_move_group_pose_goal(goal_pose, group="manipulator", end_link_name="link_7", plan_only=False)
rospy.loginfo("Sending goal...")
self.moveit_ac.send_goal(moveit_goal)
rospy.loginfo("Waiting for result...")
self.moveit_ac.wait_for_result(rospy.Duration(10.0))
moveit_result = self.moveit_ac.get_result()
# Goto position 2
goal_point = Point(0.305, -0.249, -1.05)
goal_pose.position = goal_point
quat = quaternion_from_euler(0.0, 0.0, 0.0) # roll, pitch, yaw
goal_pose.orientation = Quaternion(*quat.tolist())
moveit_goal = self.create_move_group_pose_goal(goal_pose, group="manipulator", end_link_name="link_7", plan_only=False)
rospy.loginfo("Sending goal...")
self.moveit_ac.send_goal(moveit_goal)
rospy.loginfo("Waiting for result...")
self.moveit_ac.wait_for_result(rospy.Duration(10.0))
moveit_result = self.moveit_ac.get_result()
示例3: update
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def update(self):
# protected region updateCode on begin #
in_CameraDetections = copy.deepcopy(self.in_CameraDetections)
# check if detection is available
if len(in_CameraDetections.poses) <= 0:
return
# do transformation from pixel coords to camera_base_link coords in meter
out1_CameraDetections = PoseArray()
out1_CameraDetections.header = in_CameraDetections.header
for pose in in_CameraDetections.poses:
new_pose = Pose()
new_pose.position.x = (pose.position.x - self.config_ResolutionX/2.0) * self.config_MeterPerPixel
new_pose.position.y = (pose.position.y - self.config_ResolutionY/2.0) * self.config_MeterPerPixel
new_pose.position.z = 0.0
new_pose.orientation = pose.orientation
out1_CameraDetections.poses.append(new_pose)
# do transformation from camera_base_link to robot base_link
out_CameraDetections = PoseArray()
out_CameraDetections.header = out1_CameraDetections.header
for pose in out1_CameraDetections.poses:
new_pose = Pose()
new_pose.position.x = self.camera_base_link_offset_X + pose.position.x
new_pose.position.y = self.camera_base_link_offset_Y - pose.position.y
new_pose.position.z = 0.0
new_pose.orientation = pose.orientation # TODO: rotate 180deg around x-axis
out_CameraDetections.poses.append(new_pose)
self.out_CameraDetections = out_CameraDetections
# protected region updateCode end #
pass
示例4: execute
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def execute(self, userdata):
rospy.loginfo("Creating goal to put robot in front of handle")
pose_handle = Pose()
if userdata.door_detection_data_in_base_link.handle_side == "left" or userdata.door_detection_data_in_base_link.handle_side == "right": # closed door
(r, p, theta) = euler_from_quaternion(( userdata.door_detection_data_in_base_link.door_handle.pose.orientation.x,
userdata.door_detection_data_in_base_link.door_handle.pose.orientation.y,
userdata.door_detection_data_in_base_link.door_handle.pose.orientation.z,
userdata.door_detection_data_in_base_link.door_handle.pose.orientation.w)) # gives back r, p, y
theta += 3.1416 # the orientation of the door is looking towards the robot, we need the inverse
pose_handle.orientation = Quaternion(*quaternion_from_euler(0, 0, theta)) # orientation to look parallel to the door
pose_handle.position.x = userdata.door_detection_data_in_base_link.door_handle.pose.position.x - 0.4 # to align the shoulder with the handle
pose_handle.position.y = userdata.door_detection_data_in_base_link.door_handle.pose.position.y + 0.2 # refer to upper comment
pose_handle.position.z = 0.0 # we dont need the Z for moving
userdata.door_handle_pose_goal = pose_handle
else: # open door
# if it's open... just cross it?
(r, p, theta) = euler_from_quaternion(( userdata.door_detection_data_in_base_link.door_position.pose.orientation.x,
userdata.door_detection_data_in_base_link.door_position.pose.orientation.y,
userdata.door_detection_data_in_base_link.door_position.pose.orientation.z,
userdata.door_detection_data_in_base_link.door_position.pose.orientation.w)) # gives back r, p, y
theta += 3.1416 # the orientation of the door is looking towards the robot, we need the inverse
pose_handle.orientation = Quaternion(*quaternion_from_euler(0, 0, theta)) # orientation to look parallel to the door
pose_handle.position.x = userdata.door_detection_data_in_base_link.door_position.pose.position.x + 1.0 # enter the door
pose_handle.position.y = userdata.door_detection_data_in_base_link.door_position.pose.position.y
userdata.door_handle_pose_goal = pose_handle
rospy.loginfo("Move base goal: \n" + str(pose_handle))
return succeeded
示例5: update_particle_cloud
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def update_particle_cloud(self, scan):
#remove NaN and inf values
scan.ranges = scan_filter(scan.ranges, self.sensor_model.scan_range_max)
if len(scan.ranges) > 0:
weights = [self.sensor_model.get_weight(scan, pose) for pose in self.particlecloud.poses]
else:
print "Error: Scan ranges null"
return
# used for finding how confident you are in the weights - decay the weight average
w_avg = mean(weights)
self.slow_decay += 0.001 * (w_avg - self.slow_decay)
self.fast_decay += 0.1 * (w_avg - self.fast_decay)
breakpoints=cumsum(weights)
maximum = max(breakpoints)
# the probability of the weights being okay
prob = max(0.0, 1.0 - (self.fast_decay/self.slow_decay))
if not prob == 0:
loops = int(len(self.particlecloud.poses) * prob)
else:
loops = 0
# Update particlecloud, given map and laser scan
pose_arr = PoseArray()
for i in range(0,len(self.particlecloud.poses)):
new_pose = Pose()
# add completely random noise to re-localise
if i < loops:
# 33.1 and 31.95 being the x and y sizes of the map respectively
new_pose.position.x = uniform(0, 33.1)
new_pose.position.y = uniform(0,31.95)
new_pose.orientation = rotateQuaternion(Quaternion(w=1), uniform(0, math.pi*2))
# otherwise use roulette wheel resampling to resample
else:
# make a random pick
pick = uniform(0, maximum)
# an nlogn implementation of the roulette wheel search - by splitting sorted list in half repeatedly
i = bisect.bisect(breakpoints, pick)
choice = self.particlecloud.poses[i]
position = choice.position
orientation = choice.orientation
# add resampling noise to cope with changes in odometry
new_pose.position.x = gauss(position.x, 0.05)
new_pose.position.y = gauss(position.y, 0.05)
rotation = gauss(0, 0.125)
new_pose.orientation = rotateQuaternion(orientation, rotation)
new_pose.orientation = orientation
pose_arr.poses.append(new_pose)
self.particlecloud = pose_arr
示例6: particle_to_pose
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def particle_to_pose(particle):
''' Converts a particle in the form [x, y, theta] into a Pose object '''
pose = Pose()
pose.position.x = particle[0]
pose.position.y = particle[1]
pose.orientation = angle_to_quaternion(particle[2])
return pose
示例7: get_grasp_pose
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def get_grasp_pose(g_decision_making_state, object_grasp_pose, object_grasp_pose_set):
# load pre-computer grasps
# loaded_file = numpy.load('../grasp_data/' + g_decision_making_state.object_name + '.npz' )
# loaded_grasp = loaded_file['grasp'][0]
# print len(loaded_grasp[g_decision_making_state.object_name])
loaded_file = numpy.load('../grasp_data/' + g_decision_making_state.object_name + '.npz' )
loaded_grasp = loaded_file['grasp'][0]
num_of_grasp = len(loaded_grasp[g_decision_making_state.object_name])
temp = object_grasp_pose
for i in range(0,num_of_grasp):
print i
print_angles(loaded_grasp[g_decision_making_state.object_name][i])
print_angles(temp)
object_grasp_pose_temp = temp
print_angles(temp)
object_grasp_pose_set[i] = Pose()
object_grasp_pose_set[i].position = temp.position
object_grasp_pose_set[i].orientation = rotate_orientation(object_grasp_pose_temp.orientation, loaded_grasp[g_decision_making_state.object_name][i].orientation)
move_ok = plan_and_move_end_effector_to(object_grasp_pose, 0.3,
g_bin_positions[g_decision_making_state.bin_id]['map_robot_position_2_ik_seed'])
print_angles(object_grasp_pose_set[i])
# raw_input("press Enter to continue...")
return object_grasp_pose_set
示例8: execute
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def execute(self, userdata):
userdata.orders = [DrinkOrder('david', 'coke'), DrinkOrder('michael', 'milk')]
pose = Pose()
pose.position = Point(0.059, 6.26, 0.0)
pose.orientation = Quaternion(0, 0, -0.59, 0.8)
userdata.guests_location = pose
return succeeded
示例9: obj_response_cb
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def obj_response_cb(userdata, response):
if response.exists:
pose = Pose()
#Wont there be problems mixing float32 with 64? TODO
pose.position = Point(response.base_coordinates.x, response.base_coordinates.y, 0)
#TODO, this may give problems
pose.orientation = Quaternion(*quaternion_from_euler(0, 0, response.base_coordinates.z))
if MANTAIN_DISTANCE_FROM_OBJECT:
pose = pose_at_distance(pose, DISTANCE_FROM_OBJECT)
userdata.object_location = pose
release_pose = ObjectPose()
release_pose.pose = response.arm_coordinates
userdata.object_release_location = release_pose
print "\n\n printing pose for move to object"
print pose
print "\n\n printing pose for releasing the object"
print release_pose
print "\n that was the pose of the object\n\n"
print userdata.object_name
print "is the object name"
return succeeded
else:
userdata.object_location = None
return aborted
示例10: transform_pose
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def transform_pose(pose_in, transform):
'''
Transforms a pose
Takes a pose defined in the frame defined by transform (i.e. the frame in which transform is the origin) and
returns it in the frame in which transform is defined. Calling this with the origin pose will return transform.
This returns
pose.point = transform_point(pose_in.point, transform)
pose.orientation = transform_orientation(pose_in.orientation, transform)
**Args:**
**pose (geometry_msgs.msg.Pose):** Pose to transform
**transform (geometry_msgs.msg.Pose):** The transform
**Returns:**
A geometry_msgs.msg.Pose
'''
pose = Pose()
pose.position = transform_point(pose_in.position, transform)
pose.orientation = transform_quaternion(pose_in.orientation, transform)
return pose
示例11: find_joint_pose
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def find_joint_pose(self, pose, targ_x=0.0, targ_y=0.0, targ_z=0.0,
targ_ox=0.0, targ_oy=0.0, targ_oz=0.0):
'''
Finds the joint position of the arm given some pose and the
offsets from it (to avoid opening the structure all the time
outside of the function).
'''
ik_srv = "ExternalTools/right/PositionKinematicsNode/IKService"
iksvc = rospy.ServiceProxy(ik_srv, SolvePositionIK)
ik_request = SolvePositionIKRequest()
the_pose = deepcopy(pose)
the_pose['position'] = Point(x=targ_x + self.baxter_off.linear.x,
y=targ_y + self.baxter_off.linear.y,
z=targ_z + self.baxter_off.linear.z)
angles = tf.transformations.quaternion_from_euler( \
targ_ox + self.baxter_off.angular.x, \
targ_oy + self.baxter_off.angular.y, \
targ_oz + self.baxter_off.angular.z)
the_pose['orientation'] = Quaternion(x=angles[0],
y=angles[1],
z=angles[2],
w=angles[3])
approach_pose = Pose()
approach_pose.position = the_pose['position']
approach_pose.orientation = the_pose['orientation']
hdr = Header(stamp=rospy.Time.now(), frame_id='base')
pose_req = PoseStamped(header=hdr, pose=approach_pose)
ik_request.pose_stamp.append(pose_req)
resp = iksvc(ik_request)
return dict(zip(resp.joints[0].name, resp.joints[0].position))
示例12: homogeneous_product_pose_transform
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def homogeneous_product_pose_transform(pose, transform):
"""
pose should be Pose() msg
transform is Transform()
"""
# 1. Get the transform homogenous matrix.
Ht = tf.transformations.quaternion_matrix([transform.rotation.x, transform.rotation.y,
transform.rotation.z, transform.rotation.w])
Tt = [transform.translation.x, transform.translation.y, transform.translation.z]
Ht[:3,3] = Tt
# 2.Original pose to homogenous matrix
H0 = tf.transformations.quaternion_matrix([pose.orientation.x, pose.orientation.y,
pose.orientation.z, pose.orientation.w])
T0 = [pose.position.x, pose.position.y, pose.position.z]
H0[:3,3] = T0
H1 = numpy.dot(H0, Ht)
# Get results from matrix
result_position = H1[:3,3].T
result_position = geometry_msgs.msg.Vector3(result_position[0], result_position[1], result_position[2])
result_quat = tf.transformations.quaternion_from_matrix(H1)
result_quat = geometry_msgs.msg.Quaternion(result_quat[0], result_quat[1], result_quat[2], result_quat[3])
result_pose = Pose()
result_pose.position = result_position
result_pose.orientation = result_quat
return result_pose
示例13: __init__
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def __init__(self):
smach.StateMachine.__init__(self, [succeeded, preempted, aborted])
with self:
intro_text = "Good!, I will take it to the hanger."
smach.StateMachine.add('INTRO',
SpeakActionState(intro_text),
transitions={succeeded: 'MOVE_TO_HANGER'})
# move to hanger
self.userdata.hanger = HANGER_NAME
smach.StateMachine.add('MOVE_TO_HANGER',
MoveToRoomStateMachine(announcement=None),
transitions={succeeded: 'APPROACH_TO_HANGER',
preempted: preempted,
aborted: 'MOVE_TO_HANGER'},
remapping={'room_name': 'hanger'})
# Rotate 180º
pose_turn = Pose()
pose_turn.orientation = Quaternion(*quaternion_from_euler(0,0,3.6))
#smach.StateMachine.add('MOVE_TO_HANGER',
#MoveActionState(move_base_action_name='/move_by/move_base',pose=pose_turn),
#transitions={succeeded: 'APPROACH_TO_HANGER',
#preempted: preempted,
#aborted: 'MOVE_TO_HANGER'},
#remapping={'room_name': 'hanger'})
smach.StateMachine.add('APPROACH_TO_HANGER',
SMClothHangingApproachHangerPartStateMachine(),
transitions={succeeded: succeeded,
preempted: preempted,
aborted: aborted})
示例14: project
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def project(self, p, radius, width, height) :
#print p
#print width
#print height
#print "radius"
#print radius
xFOV = 63.38
yFOV = 48.25
cx = width /2
cy = height /2
fx = cx / np.tan((xFOV/2) * np.pi / 180)
fy = cy / np.tan((yFOV/2) * np.pi / 180)
toball = np.zeros(3)
toball[0] = (p[0] - cx) / fx
toball[1] = -(p[1] - cy) / fy
toball[2] = 1
toball = toball / np.linalg.norm(toball) #normalize so we can then multiply by distance
distance = self.pixel_radius / radius
toball = toball * distance
pose = Pose()
pose.position = Point(toball[0], toball[1], toball[2])
pose.orientation = Quaternion(0,0,0,1)
#print "FOUND ORANGE BALL!!!!"
#print toball
return pose
示例15: process_collision_geometry_for_cluster
# 需要导入模块: from geometry_msgs.msg import Pose [as 别名]
# 或者: from geometry_msgs.msg.Pose import orientation [as 别名]
def process_collision_geometry_for_cluster(self, cluster):
rospy.loginfo("adding cluster with %d points to collision map"%len(cluster.points))
many_boxes = CollisionObject()
many_boxes.operation.operation = CollisionObjectOperation.ADD
many_boxes.header = cluster.header
many_boxes.header.stamp = rospy.Time.now()
num_to_use = int(len(cluster.points)/100.0)
random_indices = range(len(cluster.points))
scipy.random.shuffle(random_indices)
random_indices = random_indices[0:num_to_use]
for i in range(num_to_use):
shape = Shape()
shape.type = Shape.BOX
shape.dimensions = [.005]*3
pose = Pose()
pose.position.x = cluster.points[random_indices[i]].x
pose.position.y = cluster.points[random_indices[i]].y
pose.position.z = cluster.points[random_indices[i]].z
pose.orientation = Quaternion(*[0,0,0,1])
many_boxes.shapes.append(shape)
many_boxes.poses.append(pose)
collision_name = self.get_next_object_name()
many_boxes.id = collision_name
self.object_in_map_pub.publish(many_boxes)
return collision_name