本文整理汇总了Python中tf.transformations.quaternion_from_euler函数的典型用法代码示例。如果您正苦于以下问题:Python quaternion_from_euler函数的具体用法?Python quaternion_from_euler怎么用?Python quaternion_from_euler使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了quaternion_from_euler函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: execute
def execute(self, userdata):
rospy.loginfo("Creating goal to put robot in front of handle")
pose_handle = Pose()
if userdata.door_detection_data_in_base_link.handle_side == "left" or userdata.door_detection_data_in_base_link.handle_side == "right": # closed door
(r, p, theta) = euler_from_quaternion(( userdata.door_detection_data_in_base_link.door_handle.pose.orientation.x,
userdata.door_detection_data_in_base_link.door_handle.pose.orientation.y,
userdata.door_detection_data_in_base_link.door_handle.pose.orientation.z,
userdata.door_detection_data_in_base_link.door_handle.pose.orientation.w)) # gives back r, p, y
theta += 3.1416 # the orientation of the door is looking towards the robot, we need the inverse
pose_handle.orientation = Quaternion(*quaternion_from_euler(0, 0, theta)) # orientation to look parallel to the door
pose_handle.position.x = userdata.door_detection_data_in_base_link.door_handle.pose.position.x - 0.4 # to align the shoulder with the handle
pose_handle.position.y = userdata.door_detection_data_in_base_link.door_handle.pose.position.y + 0.2 # refer to upper comment
pose_handle.position.z = 0.0 # we dont need the Z for moving
userdata.door_handle_pose_goal = pose_handle
else: # open door
# if it's open... just cross it?
(r, p, theta) = euler_from_quaternion(( userdata.door_detection_data_in_base_link.door_position.pose.orientation.x,
userdata.door_detection_data_in_base_link.door_position.pose.orientation.y,
userdata.door_detection_data_in_base_link.door_position.pose.orientation.z,
userdata.door_detection_data_in_base_link.door_position.pose.orientation.w)) # gives back r, p, y
theta += 3.1416 # the orientation of the door is looking towards the robot, we need the inverse
pose_handle.orientation = Quaternion(*quaternion_from_euler(0, 0, theta)) # orientation to look parallel to the door
pose_handle.position.x = userdata.door_detection_data_in_base_link.door_position.pose.position.x + 1.0 # enter the door
pose_handle.position.y = userdata.door_detection_data_in_base_link.door_position.pose.position.y
userdata.door_handle_pose_goal = pose_handle
rospy.loginfo("Move base goal: \n" + str(pose_handle))
return succeeded
示例2: create_axes_markers
def create_axes_markers(self, msg):
x_marker = Marker()
x_marker.type = Marker.CYLINDER
x_marker.scale.x = msg.scale * 0.05
x_marker.scale.y = msg.scale * 0.05
x_marker.scale.z = msg.scale * 0.25
x_marker.color.a = 1.0
y_marker = copy.deepcopy(x_marker)
z_marker = copy.deepcopy(x_marker)
x_marker.pose.position.x += x_marker.scale.z * 0.5
y_marker.pose.position.y += y_marker.scale.z * 0.5
z_marker.pose.position.z += z_marker.scale.z * 0.5
x_marker.color.r = 1.0
y_marker.color.g = 1.0
z_marker.color.b = 1.0
x_quat = tr.quaternion_from_euler(0, np.deg2rad(90), 0)
x_marker.pose.orientation.x = x_quat[0]
x_marker.pose.orientation.y = x_quat[1]
x_marker.pose.orientation.z = x_quat[2]
x_marker.pose.orientation.w = x_quat[3]
y_quat = tr.quaternion_from_euler(np.deg2rad(-90), 0, 0)
y_marker.pose.orientation.x = y_quat[0]
y_marker.pose.orientation.y = y_quat[1]
y_marker.pose.orientation.z = y_quat[2]
y_marker.pose.orientation.w = y_quat[3]
y_marker.id = 1
z_marker.id = 2
return x_marker, y_marker, z_marker
示例3: calculate_mk2_ik
def calculate_mk2_ik(self):
#goal_point = Point(0.298, -0.249, -0.890) home position
goal_pose = Pose()
# Goto position 1
goal_point = Point(0.298, -0.249, -0.890)
#goal_point = Point(0.298, -0.5, -1.0)
goal_pose.position = goal_point
quat = quaternion_from_euler(0.0, 0.0, 0.0) # roll, pitch, yaw
goal_pose.orientation = Quaternion(*quat.tolist())
moveit_goal = self.create_move_group_pose_goal(goal_pose, group="manipulator", end_link_name="link_7", plan_only=False)
rospy.loginfo("Sending goal...")
self.moveit_ac.send_goal(moveit_goal)
rospy.loginfo("Waiting for result...")
self.moveit_ac.wait_for_result(rospy.Duration(10.0))
moveit_result = self.moveit_ac.get_result()
# Goto position 2
goal_point = Point(0.305, -0.249, -1.05)
goal_pose.position = goal_point
quat = quaternion_from_euler(0.0, 0.0, 0.0) # roll, pitch, yaw
goal_pose.orientation = Quaternion(*quat.tolist())
moveit_goal = self.create_move_group_pose_goal(goal_pose, group="manipulator", end_link_name="link_7", plan_only=False)
rospy.loginfo("Sending goal...")
self.moveit_ac.send_goal(moveit_goal)
rospy.loginfo("Waiting for result...")
self.moveit_ac.wait_for_result(rospy.Duration(10.0))
moveit_result = self.moveit_ac.get_result()
示例4: listen_imu
def listen_imu(self, imu):
# node can sleep for 16 out of 20 ms without dropping any messages
# rospy.sleep(16.0/1000.0)
# self.NN += 1
# if rospy.get_time() - self.t0 > 1:
# rospy.logfatal("Published {} messages in 1 s, {} Hz".format(self.NN, self.NN))
# self.NN, self.t0 = 0, rospy.get_time()
current_orientation = np.array([imu.orientation.x,
imu.orientation.y,
imu.orientation.z,
imu.orientation.w])
current_angular_speed = np.array([imu.angular_velocity.x,
imu.angular_velocity.y,
0.0])
current_rpy = list(transformations.euler_from_quaternion(current_orientation))
# No care about yaw, but must be close to zero
current_rpy[-1] = 0
desired_rpy = list(transformations.euler_from_quaternion(self.desired_orientation))
desired_rpy[-1] = 0
corrected = self.rp_pid(desired_rpy, current_rpy, current_angular_speed)
corrected_orientation = transformations.quaternion_multiply(
transformations.quaternion_from_euler(*corrected),
transformations.quaternion_from_euler(*current_rpy))
setpoint_pose = Pose(self.desired_position, corrected_orientation)
servo_angles = self.platform.ik(setpoint_pose)
rospy.logdebug(
"Servo angles (deg): {}".format(np.rad2deg(servo_angles)))
self.publish_servo_angles(servo_angles)
示例5: follow_pose_with_admitance_by_ft
def follow_pose_with_admitance_by_ft(self):
fx, fy, fz = self.get_force_movement()
rospy.loginfo(
"fx, fy, fz: " + str((round(fx, 3), round(fy, 3), round(fz, 3))))
ps = Pose()
if abs(fx) > self.fx_deadband:
ps.position.x = (fx * self.fx_scaling) * self.frame_fixer.fx
if abs(fy) > self.fy_deadband:
ps.position.y = (fy * self.fy_scaling) * self.frame_fixer.fy
if abs(fz) > self.fz_deadband:
ps.position.z = (fz * self.fz_scaling) * self.frame_fixer.fz
tx, ty, tz = self.get_torque_movement()
rospy.loginfo(
"tx, ty, tz: " + str((round(tx, 3), round(ty, 3), round(tz, 3))))
roll = pitch = yaw = 0.0
if abs(tx) > self.tx_deadband:
roll += (tx * self.tx_scaling) * self.frame_fixer.tx
if abs(ty) > self.ty_deadband:
pitch += (ty * self.ty_scaling) * self.frame_fixer.ty
if abs(tz) > self.tz_deadband:
yaw += (tz * self.tz_scaling) * self.frame_fixer.tz
q = quaternion_from_euler(roll, pitch, yaw)
ps.orientation = Quaternion(*q)
o = self.last_pose_to_follow.pose.orientation
r_lastp, p_lastp, y_lastp = euler_from_quaternion([o.x, o.y, o.z, o.w])
final_roll = r_lastp + roll
final_pitch = p_lastp + pitch
final_yaw = y_lastp + yaw
self.current_pose.pose.orientation = Quaternion(*quaternion_from_euler(final_roll,
final_pitch,
final_yaw))
self.current_pose.pose.position.x = self.last_pose_to_follow.pose.position.x + \
ps.position.x
self.current_pose.pose.position.y = self.last_pose_to_follow.pose.position.y + \
ps.position.y
self.current_pose.pose.position.z = self.last_pose_to_follow.pose.position.z + \
ps.position.z
self.current_pose.pose.position.x = self.sanitize(self.current_pose.pose.position.x,
self.min_x,
self.max_x)
self.current_pose.pose.position.y = self.sanitize(self.current_pose.pose.position.y,
self.min_y,
self.max_y)
self.current_pose.pose.position.z = self.sanitize(self.current_pose.pose.position.z,
self.min_z,
self.max_z)
self.current_pose.header.stamp = rospy.Time.now()
if self.send_goals: # send MODIFIED GOALS
self.pose_pub.publish(self.current_pose)
else:
self.last_pose_to_follow.header.stamp = rospy.Time.now()
self.pose_pub.publish(self.last_pose_to_follow)
self.debug_pose_pub.publish(self.current_pose)
示例6: in_odom_callback
def in_odom_callback(self, in_odom_msg):
q = np.array([in_odom_msg.pose.pose.orientation.x,
in_odom_msg.pose.pose.orientation.y,
in_odom_msg.pose.pose.orientation.z,
in_odom_msg.pose.pose.orientation.w])
p = np.array([in_odom_msg.pose.pose.position.x,
in_odom_msg.pose.pose.position.y,
in_odom_msg.pose.pose.position.z])
e = tfs.euler_from_quaternion(q, 'rzyx')
wqb = tfs.quaternion_from_euler(e[0], e[1], e[2], 'rzyx')
wqc = tfs.quaternion_from_euler(e[0], 0.0, 0.0, 'rzyx')
#### odom ####
odom_msg = in_odom_msg
assert(in_odom_msg.header.frame_id == self.frame_id_in)
odom_msg.header.frame_id = self.frame_id_out
odom_msg.child_frame_id = ""
self.out_odom_pub.publish(odom_msg)
#### tf ####
if self.broadcast_tf and self.tf_pub_flag:
self.tf_pub_flag = False
if not self.frame_id_in == self.frame_id_out:
br.sendTransform((0.0, 0.0, 0.0),
tfs.quaternion_from_euler(0.0, 0.0, 0.0, 'rzyx'),
odom_msg.header.stamp,
self.frame_id_in,
self.frame_id_out)
if not self.world_frame_id == self.frame_id_out:
br.sendTransform((0.0, 0.0, 0.0),
tfs.quaternion_from_euler(0.0, 0.0, 0.0, 'rzyx'),
odom_msg.header.stamp,
self.world_frame_id,
self.frame_id_out)
br.sendTransform((p[0], p[1], p[2]),
wqb,
odom_msg.header.stamp,
self.body_frame_id,
self.world_frame_id)
br.sendTransform(((p[0], p[1], p[2])),
wqc,
odom_msg.header.stamp,
self.intermediate_frame_id,
self.world_frame_id)
#### path ####
pose = PoseStamped()
pose.header = odom_msg.header
pose.pose.position.x = p[0]
pose.pose.position.y = p[1]
pose.pose.position.z = p[2]
pose.pose.orientation.x = q[0]
pose.pose.orientation.y = q[1]
pose.pose.orientation.z = q[2]
pose.pose.orientation.w = q[3]
self.path.append(pose)
示例7: publish_gaussians
def publish_gaussians():
# Use TF to calculate relative area definitions
# Gaussians depending on edges of furniture and object/furniture positions
br.sendTransform((0.205067,-0.0136504 , 0),
quaternion_from_euler(0,0,0),
rospy.Time.now(),
"table_gauss_mean",
"plate_edge")
br.sendTransform((0.366749, 0.088665, 0),
quaternion_from_euler(0,0,0),
rospy.Time.now(),
"drawer55_gauss_mean",
"drawer55_edge")
br.sendTransform((0.1462250, 0.1426716 , 0),
quaternion_from_euler(0,0,0),
rospy.Time.now(),
"stove_gauss_mean",
"placemat_edge")
# TODO Here, i manually set this to minus, but this should actually be calculated
# based on the location of the door of the cupboard
br.sendTransform((0.42658, -0.107175 , 0),
quaternion_from_euler(0,0,0),
rospy.Time.now(),
"cupboard_gauss_mean",
"drawer115_edge")
示例8: publish_gaussians
def publish_gaussians():
# Use TF to calculate relative area definitions
br.sendTransform((0.9718, 2.6195, 0),
quaternion_from_euler(0,0,3.1415) ,
rospy.Time.now(),
"drawer_gaussian_mean",
"map")
br.sendTransform((2.17495, 2.6431, 0),
quaternion_from_euler(0,0,0) ,
rospy.Time.now(),
"table_gaussian_mean",
"map")
br.sendTransform((0.76371, 2.17398, 0),
quaternion_from_euler(0,0,3.1415) ,
rospy.Time.now(),
"stove_gaussian_mean",
"map")
br.sendTransform((0.77843, 3.21516, 0),
quaternion_from_euler(0,0,3.1415) ,
rospy.Time.now(),
"cupboard_gaussian_mean",
"map")
示例9: publish_gaussians
def publish_gaussians():
# Gaussians depending on edges of furniture and object/furniture positions
br.sendTransform((0.205067,-0.0136504 , 0),
quaternion_from_euler(0,0,0),
rospy.Time.now(),
"table_gauss_mean",
"plate_edge")
br.sendTransform((0.366749, 0.088665, 0),
quaternion_from_euler(0,0,0),
rospy.Time.now(),
"drawer_gauss_mean",
"drawer_edge")
br.sendTransform((0.1462250, 0.1426716 , 0),
quaternion_from_euler(0,0,0),
rospy.Time.now(),
"stove_gauss_mean",
"placemat_edge")
br.sendTransform((0.42658, 0.107175 , 0),
quaternion_from_euler(0,0,0),
rospy.Time.now(),
"cupboard_gauss_mean",
"cupboard_edge")
示例10: test_avg_tag_localization
def test_avg_tag_localization(self):
self.setup()
# Publish a matching apriltag observation (from the static publisher in the test file)
msg_tag = AprilTags()
tag_100 = TagDetection()
tag_100.id = 100
trans = tag_100.transform.translation
rot = tag_100.transform.rotation
(trans.x, trans.y, trans.z) = (1,-1,0)
(rot.x, rot.y, rot.z, rot.w) = (0,0,1,0)
msg_tag.detections.append(tag_100)
tag_101 = TagDetection()
tag_101.id = 101
trans = tag_101.transform.translation
rot = tag_101.transform.rotation
(trans.x, trans.y, trans.z) = (1,0,0)
(rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_euler(0,0,np.pi/2)
msg_tag.detections.append(tag_101)
self.pub_tag.publish(msg_tag)
# Wait for the node to publish a robot transform
tfbuf = tf2_ros.Buffer()
tfl = tf2_ros.TransformListener(tfbuf)
try:
Tr_w = tfbuf.lookup_transform("world", "duckiebot", rospy.Time(), rospy.Duration(5))
except(tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
self.assertFalse(True, "Test timed out waiting for the transform to be broadcast.")
trans = (Tr_w.transform.translation.x, Tr_w.transform.translation.y, Tr_w.transform.translation.z)
rot = (Tr_w.transform.rotation.x, Tr_w.transform.rotation.y, Tr_w.transform.rotation.z, Tr_w.transform.rotation.w)
numpy.testing.assert_array_almost_equal(trans, (2, 0.5, 0))
numpy.testing.assert_array_almost_equal(rot, tr.quaternion_from_euler(0,0,3*np.pi/4))
示例11: plan_path_cb
def plan_path_cb(self, req):
rospy.loginfo('Received [plan_path] request.')
p1, p2 = (req.start.x, req.start.y), (req.end.x, req.end.y)
path = self.planner.plan(p1, p2)
try:
self.visualizer.visualize(self.planner.graph)
except:
# If a student code in RandomizedRoadmapPlanner does not have a
# graph object, or it is not compatible with GraphVisualizer,
# silently skip visualization
pass
if not len(path):
rospy.logwarn(self.__class__.__name__ + ': Failed to find a path.')
return None
# Convert the path output by the planner to ROS message
path_msg = Path()
path_msg.header.frame_id = 'odom'
path_msg.header.stamp = rospy.Time.now()
q_start = quaternion_from_euler(0, 0, req.start.theta)
q_end = quaternion_from_euler(0, 0, req.end.theta)
for pt in path:
p = PoseStamped()
p.pose.position = Point(pt[0], pt[1], 0.0)
p.pose.orientation = Quaternion(*q_start)
path_msg.poses.append(p)
# The randomized roadmap planner does not consider orientation, thus
# we manually add a final path segment which rotates the robot to the
# desired orientation
last = path_msg.poses[-1]
last.pose.orientation = Quaternion(*q_end)
path_msg.poses.append(last)
return path_msg
示例12: drawer
def drawer(self, point):
#Prepare
GRIPPER_OPEN = .08
GRIPPER_CLOSE = .003
MAX_HANDLE_SIZE = .03
linear_movement = self.behaviors.movement
gripper = self.robot.left_gripper
#gripper.open(True, position=GRIPPER_OPEN)
#linear_movement.gripper_open()
self.open_gripper()
linear_movement.move_absolute((self.start_location_drawer[0],
#np.matrix(tr.quaternion_from_euler(np.radians(90.), 0, 0))),
np.matrix(tr.quaternion_from_euler(np.radians(90.), 0, 0))),
stop='pressure_accel', pressure=1000)
#Reach
success, reason, touchloc_bl = self.behaviors.reach(point, 300, #np.matrix([0.0, 0, 0]).T,
reach_direction=np.matrix([0.1, 0, 0]).T,
orientation=np.matrix(tr.quaternion_from_euler(np.radians(90.), 0, 0)))
#Error recovery
if not success:
error_msg = 'Reach failed due to "%s"' % reason
rospy.loginfo(error_msg)
rospy.loginfo('Failure recovery: moving back')
try:
linear_movement.move_relative_gripper(np.matrix([-.25,0,0]).T, stop='pressure_accel', pressure=300)
except lm.RobotSafetyError, e:
rospy.loginfo('robot safety error %s' % str(e))
self.behaviors.movement.move_absolute(self.start_location_drawer, stop='accel', pressure=300)
return False, 'reach failed', point
示例13: pre_place_pose
def pre_place_pose(ud):
robot.torso.high()
goal_pose = PoseStamped()
goal_pose.header.stamp = rospy.Time.now()
goal_pose.header.frame_id = 'map'
goal_pose.pose.position.x = -4.55638664735
goal_pose.pose.position.y = 4.99959353359
goal_pose.pose.orientation = Quaternion(*quaternion_from_euler(0, 0, 0.395625992))
ControlToPose(robot, goal_pose, ControlParameters(0.5, 1.0, 0.3, 0.3, 0.3, 0.01, 0.1)).execute({})
robot.torso.wait_for_motion_done()
robot.speech.speak('I am now raising my arm')
arm.resolve()._send_joint_trajectory([[-1.1, 0.044, 0.80, 0.297, 0.934, -0.95, 0.4]],
timeout=rospy.Duration(0))
arm.resolve().wait_for_motion_done()
goal_pose.pose.orientation = Quaternion(*quaternion_from_euler(0, 0, 1.9656259917))
ControlToPose(robot, goal_pose, ControlParameters(0.5, 1.0, 0.3, 0.3, 0.3, 0.01, 0.1)).execute({})
arm.resolve().wait_for_motion_done()
arm.resolve()._send_joint_trajectory([[-0.92, 0.044, 0.80, 0.497, 0.934, -0.95, 0.4]],
timeout=rospy.Duration(0))
arm.resolve().wait_for_motion_done()
rospy.sleep(1.0)
return 'done'
示例14: __init__
def __init__ (self):
rospy.init_node('move_base', anonymous=True)
rospy.on_shutdown(self.shutdown)
quaternions = list()
square_size = 1.0 # meters
# The first two target orientations are 90 degrees (horizontal pointing left)
q_turn_angle = quaternion_from_euler(0, 0, pi / 2, axes='sxyz')
q = Quaternion(*q_turn_angle)
# Append the first turn
rospy.loginfo(q)
quaternions.append(q)
# Append the second turn
quaternions.append(q)
# The second two target orientations are 270 degrees (horizontal point right)
q_turn_angle = quaternion_from_euler(0, 0, 3 * pi / 2, axes='sxyz')
q = Quaternion(*q_turn_angle)
# Append the third turn
quaternions.append(q)
# Append hte fourth turn
quaternions.append(q)
waypoints = list()
waypoints.append(Pose(Point(square_size, 0.0, 0.0), quaternions[0]))
waypoints.append(Pose(Point(square_size, square_size, 0.0), quaternions[1]))
waypoints.append(Pose(Point(0.0, square_size, 0.0), quaternions[2]))
waypoints.append(Pose(Point(0.0, 0.0, 0.0), quaternions[3]))
# Initialize the visualization markers for RViz
self.init_markers()
# Set a visualization marker at each waypoint
for waypoint in waypoints:
p = Point()
p = waypoint.position
self.markers.points.append(p)
# Publisher to manually control the robot (e.g. to stop it)
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)
# Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("Waiting for move_base action server...")
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move base server")
rospy.loginfo("Starting navigation test")
for i in range(4):
self.marker_pub.publish(self.markers)
self.goal = MoveBaseGoal()
self.goal.target_pose.pose = waypoints[i]
self.goal.target_pose.header.frame_id = 'map'
self.goal.target_pose.header.stamp = rospy.Time.now()
self.move()
示例15: _publish_tf
def _publish_tf(self, stamp):
# check that we know which frames we need to publish from
if self.map is None or self.base_frame is None:
rospy.loginfo('Not publishing until map and odometry frames found')
return
# calculate the mean position
x = np.mean([p.x for p in self.particles])
y = np.mean([p.y for p in self.particles])
theta = np.mean([p.theta for p in self.particles]) #TODO - wraparound
# map to base_link
map2base_frame = PyKDL.Frame(
PyKDL.Rotation.Quaternion(*transformations.quaternion_from_euler(0, 0, theta)),
PyKDL.Vector(x,y,0)
)
odom2base_frame = tf2_kdl.transform_to_kdl(self.tf_buffer.lookup_transform(
target_frame=self.odom_frame,
source_frame=self.base_frame,
time=stamp,
timeout=rospy.Duration(4.0)
))
# derive frame according to REP105
map2odom = map2base_frame * odom2base_frame.Inverse()
br = tf2_ros.TransformBroadcaster()
t = TransformStamped()
t.header.stamp = stamp
t.header.frame_id = self.map.frame
t.child_frame_id = self.odom_frame
t.transform.translation = Vector3(*map2odom.p)
t.transform.rotation = Quaternion(*map2odom.M.GetQuaternion())
br.sendTransform(t)
# for Debugging
if False:
t.header.stamp = stamp
t.header.frame_id = self.map.frame
t.child_frame_id = self.base_frame+"_old"
t.transform.translation = Vector3(*map2base_frame.p)
t.transform.rotation = Quaternion(*map2base_frame.M.GetQuaternion())
br.sendTransform(t)
if self.publish_cloud:
msg = PoseArray(
header=Header(stamp=stamp, frame_id=self.map.frame),
poses=[
Pose(
position=Point(p.x, p.y, 0),
orientation=Quaternion(*transformations.quaternion_from_euler(0, 0, p.theta))
)
for p in self.particles
]
)
self.pose_array.publish(msg)