本文整理汇总了Python中tf.transformations.translation_from_matrix函数的典型用法代码示例。如果您正苦于以下问题:Python translation_from_matrix函数的具体用法?Python translation_from_matrix怎么用?Python translation_from_matrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了translation_from_matrix函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: wrench_callback
def wrench_callback(self, state):
#calculating force estimate from position error (does not compensate for motion error due to dynamics)
q = self.robot.get_joint_angles()
pos,_ = self.robot.kinematics.FK(q)
x_err = np.matrix([state.point.x-pos[0,0], state.point.y-pos[1,0], state.point.z-pos[2,0]]).T
##########This was for debugging purposes
# ps = PointStamped()
# ps.header.frame_id = '/torso_lift_link'
# ps.header.stamp = rospy.get_rostime()
# ps.point.x = x_dot_err[0,0]
# ps.point.y = x_dot_err[0,0]
# ps.point.z = 0
#self.err_pub.publish(ps)
feedback = -1.0*self.kp*x_err # - self.kd*x_dot_err This term is now included in real-time omni driver
#find and use the rotation matrix from wrench to torso
df_R_ee = tfu.rotate(self.dest_frame, 'torso_lift_link', self.tflistener)
wr_df = self.force_scaling*np.array(tr.translation_from_matrix(df_R_ee * tfu.translation_matrix([feedback[0,0], feedback[1,0], feedback[2,0]])))
#limiting the max and min force feedback sent to omni
wr_df = np.where(wr_df>self.omni_max_limit, self.omni_max_limit, wr_df)
wr_df = np.where(wr_df<self.omni_min_limit, self.omni_min_limit, wr_df)
wr = OmniFeedback()
wr.force.x = wr_df[0]
wr.force.y = wr_df[1]
wr.force.z = wr_df[2]
if self.enable == True:
self.omni_fb.publish(wr)
示例2: space_from_joints
def space_from_joints(joint_angles):
T01, T12, T23, T34, T4e = direct_kinematics(joint_angles)
T = tfs.concatenate_matrices(T01, T12, T23, T34, T4e)
rz1, ry, rz2 = tfs.euler_from_matrix(T, 'szyz')
trans = tfs.translation_from_matrix(T)
S = np.append(trans, rz2)
return S
示例3: transformPose
def transformPose(self, target_frame, ps):
"""
:param target_frame: the tf target frame, a string
:param ps: the geometry_msgs.msg.PoseStamped message
:return: new geometry_msgs.msg.PoseStamped message, in frame target_frame
:raises: any of the exceptions that :meth:`~tf.Transformer.lookupTransform` can raise
Transforms a geometry_msgs PoseStamped message to frame target_frame, returns a new PoseStamped message.
"""
# mat44 is frame-to-frame transform as a 4x4
mat44 = self.asMatrix(target_frame, ps.header)
# pose44 is the given pose as a 4x4
pose44 = numpy.dot(xyz_to_mat44(ps.pose.position), xyzw_to_mat44(ps.pose.orientation))
# txpose is the new pose in target_frame as a 4x4
txpose = numpy.dot(mat44, pose44)
# xyz and quat are txpose's position and orientation
xyz = tuple(transformations.translation_from_matrix(txpose))[:3]
quat = tuple(transformations.quaternion_from_matrix(txpose))
# assemble return value PoseStamped
r = geometry_msgs.msg.PoseStamped()
r.header.stamp = ps.header.stamp
r.header.frame_id = target_frame
r.pose = geometry_msgs.msg.Pose(geometry_msgs.msg.Point(*xyz), geometry_msgs.msg.Quaternion(*quat))
return r
示例4: space_from_joints_small
def space_from_joints_small(joint_angles):
T01, T12, T23, T34, T45, T5e, Tec, Tem = direct_kinematics(joint_angles)
T = tfs.concatenate_matrices(T01, T12, T23, T34, T45, T5e, Tem)
rx, ry, rz = tfs.euler_from_matrix(T, 'sxyz')
trans = tfs.translation_from_matrix(T)
S = np.concatenate((trans, [rz, ry]), axis=1)
return S
示例5: _makePreGraspPose
def _makePreGraspPose(self, boxMat, axis):
if axis==0: #x axis
alpha = 0
gamma = 0
else: #y axis
alpha = pi/2
gamma = -pi/2
ik_request = PositionIKRequest()
ik_request.ik_link_name = self.toolLinkName
with self._joint_state_lock:
ik_request.ik_seed_state.joint_state = copy.deepcopy(self._joint_states)
ik_request.pose_stamped = PoseStamped()
ik_request.pose_stamped.header.stamp = rospy.Time.now()
ik_request.pose_stamped.header.frame_id = self.frameID
beta = pi/2
while beta < 3*pi/2:
rotationMatrix = transformations.euler_matrix(alpha, beta, gamma, 'rzyz')
distance = self.preGraspDistance + self.gripperFingerLength
preGraspMat = transformations.translation_matrix([0,0,-distance])
fullMat = transformations.concatenate_matrices(boxMat, rotationMatrix, preGraspMat)
p = transformations.translation_from_matrix(fullMat)
q = transformations.quaternion_from_matrix(fullMat)
print "trying {} radians".format(beta)
try:
self._ik_server(ik_request, Constraints(), rospy.Duration(5.0))
except rospy.service.ServiceException:
beta += 0.1
else:
if ik_resp.error_code.val > 0:
return beta
else:
#print ik_resp.error_code.val
beta += 0.01
rospy.logerr('No way to pick this up. All IK solutions failed.')
return 7 * pi / 6
示例6: pack_pose
def pack_pose(time, child, parent, matrix=None, trans=None, quat=None):
if matrix is not None and (trans is None and quat is None):
trans = tfs.translation_from_matrix(matrix)
quat = tfs.quaternion_from_matrix(matrix)
elif matrix is None and trans is not None and quat is not None:
matrix = None # nothing
else:
print 'invalid use'
t = TransformStamped()
t.header.frame_id = parent
t.header.stamp = time
t.child_frame_id = child
t.transform.translation.x = trans[0]
t.transform.translation.y = trans[1]
t.transform.translation.z = trans[2]
quat = numpy.array(quat)
quat = quat / numpy.linalg.norm(quat, ord=2)
t.transform.rotation.x = quat[0]
t.transform.rotation.y = quat[1]
t.transform.rotation.z = quat[2]
t.transform.rotation.w = quat[3]
return t
示例7: computeTransformation
def computeTransformation(world, slam, camera):
# Here we do not care about the slamMworld transformation
# timing as it is constant.
tWorld = listener.getLatestCommonTime(world, camera)
tMap = listener.getLatestCommonTime(slam, camera)
t = min(tWorld, tMap)
# Current pose given by the SLAM.
(slamMcam_T, slamMcam_Q) = listener.lookupTransform(camera, slam, t)
slamMcam = np.matrix(transformer.fromTranslationRotation(slamMcam_T,
slamMcam_Q))
# Estimation of the camera position given by control.
#FIXME: order is weird but it works.
(worldMcam_T, worldMcam_Q) = listener.lookupTransform(world, camera, t)
worldMcam = np.matrix(transformer.fromTranslationRotation(worldMcam_T,
worldMcam_Q))
(slamMworld_T, slamMworld_Q) = listener.lookupTransform(slam, world, t)
slamMworld = np.matrix(transformer.fromTranslationRotation(slamMworld_T,
slamMworld_Q))
slamMcamEstimated = slamMworld * worldMcam
# Inverse frames.
camMslam = np.linalg.inv(slamMcam)
camMslamEstimated = np.linalg.inv(slamMcamEstimated)
# Split.
camMslam_T = translation_from_matrix(camMslam)
camMslam_Q = quaternion_from_matrix(camMslam)
camMslam_E = euler_from_matrix(camMslam)
camMslamEstimated_T = translation_from_matrix(camMslamEstimated)
camMslamEstimated_Q = quaternion_from_matrix(camMslamEstimated)
camMslamEstimated_E = euler_from_matrix(camMslamEstimated)
# Compute correction.
camMslamCorrected_T = [camMslam_T[0],
camMslamEstimated_T[1],
camMslam_T[2]]
camMslamCorrected_E = [camMslamEstimated_E[0],
camMslam_E[1],
camMslamEstimated_E[2]]
camMslamCorrected_Q = quaternion_from_euler(*camMslamCorrected_E)
return (camMslamCorrected_T, camMslamCorrected_Q, t)
示例8: getUnitTranslationVector
def getUnitTranslationVector(self):
d = self.getPositionDistance()
if(d == 0):
return (0,0,0)
else:
t = translation_from_matrix(self._matrix)
return (t[0]/d, t[1]/d, t[2]/d)
示例9: _makeGraspPath
def _makeGraspPath(self, preGraspGoal):
'''Given a pregrasp MoveArmGoal message, generate a MoveArmGoal message to perform the final
approach to the object to grasp it.'''
graspGoal = MoveArmGoal()
graspGoal.planner_service_name = self.plannerServiceName
motion_plan_request = MotionPlanRequest()
motion_plan_request.group_name = self.armGroupName
motion_plan_request.num_planning_attempts = 25
motion_plan_request.planner_id = ""
motion_plan_request.allowed_planning_time = rospy.Duration(5,0)
#Orientation constraint is the same as for pregrasp
motion_plan_request.goal_constraints.orientation_constraints = copy.deepcopy(preGraspGoal.motion_plan_request.goal_constraints.orientation_constraints)
#motion_plan_request.goal_constraints.orientation_constraints[0].orientation = Quaternion(0.656778, 0.261999, 0.648401, -0.282093) #stow orientation for debug
graspGoal.motion_plan_request = motion_plan_request
#Translate from pregrasp position to final position in a roughly straight line
o = motion_plan_request.goal_constraints.orientation_constraints[0].orientation
p = preGraspGoal.motion_plan_request.goal_constraints.position_constraints[0].position
preGraspMat = transformations.quaternion_matrix([o.x,o.y,o.z,o.w])
preGraspMat[:3, 3] = [p.x,p.y,p.z]
distance = self.preGraspDistance * .5 + self.gripperFingerLength * .5
graspTransMat = transformations.translation_matrix([0,0,distance])
graspMat = transformations.concatenate_matrices(preGraspMat, graspTransMat)
#print preGraspMat
#print graspTransMat
#print graspMat
p = transformations.translation_from_matrix(graspMat)
#Publish grasp transform for visualization
self._tf_broadcaster.sendTransform(
(p[0],p[1],p[2]),
(o.x, o.y, o.x, o.w),
motion_plan_request.goal_constraints.orientation_constraints[0].header.stamp,
"grasp",
motion_plan_request.goal_constraints.orientation_constraints[0].header.frame_id)
pos_constraint = PositionConstraint()
pos_constraint.header = motion_plan_request.goal_constraints.orientation_constraints[0].header
pos_constraint.link_name = self.toolLinkName
pos_constraint.position = Point(p[0],p[1],p[2])
#pos_constraint.position = Point(-0.0644721, 0.609922, 0) #Stow position for debug
pos_constraint.constraint_region_shape.type = Shape.SPHERE
pos_constraint.constraint_region_shape.dimensions = [0.01]
pos_constraint.weight = 1
motion_plan_request.goal_constraints.position_constraints.append(pos_constraint)
#TODO: Add path constraint to require a (roughly) cartesian move
#Turn off collision operations between the gripper and all objects
for collisionName in self.gripperCollisionNames:
collisionOperation = CollisionOperation(collisionName,
CollisionOperation.COLLISION_SET_ALL,
0.0,
CollisionOperation.DISABLE)
graspGoal.operations.collision_operations.append(collisionOperation)
return graspGoal
示例10: get_relative_pose
def get_relative_pose(object_a, object_b):
object_a_mat = get_object_pose_as_matrix(object_a)
object_b_mat = get_object_pose_as_matrix(object_b)
rel_mat = numpy.linalg.inv(object_a_mat).dot(object_b_mat)
trans = transformations.translation_from_matrix(rel_mat)
rot = transformations.quaternion_from_matrix(rel_mat)
rot = [rot[3], rot[0], rot[1], rot[2]]
print "pose: [%f, %f, %f, %f, %f, %f, %f]" % (trans[0], trans[1], trans[2], rot[0], rot[1], rot[2], rot[3])
return (trans, rot)
示例11: _makeMove
def _makeMove(self):
'''Move forward a small distance'''
print "Making move goal"
graspGoal = MoveArmGoal()
graspGoal.planner_service_name = self.plannerServiceName
motion_plan_request = MotionPlanRequest()
motion_plan_request.group_name = self.armGroupName
motion_plan_request.num_planning_attempts = 5
motion_plan_request.planner_id = ""
motion_plan_request.allowed_planning_time = rospy.Duration(5,0)
#Orientation constraint is the same as for previous
#Create Orientation constraint object
o_constraint = OrientationConstraint()
o_constraint.header.frame_id = self.frameID
o_constraint.header.stamp = rospy.Time.now()
o_constraint.link_name = self.toolLinkName
o_constraint.orientation = Quaternion(0.656788, 0.261971, 0.648416, -0.282059)
o_constraint.absolute_roll_tolerance = 0.04
o_constraint.absolute_pitch_tolerance = 0.04
o_constraint.absolute_yaw_tolerance = 0.04
o_constraint.weight = 1
motion_plan_request.goal_constraints.orientation_constraints.append(o_constraint)
#Translate from pregrasp position to final position in a roughly straight line
o = o_constraint.orientation
p = Point(-0.064433, 0.609915, 0)
preGraspMat = transformations.quaternion_matrix([o.x,o.y,o.z,o.w])
preGraspMat[:3, 3] = [p.x,p.y,p.z]
distance = .3#self.preGraspDistance + self.gripperFingerLength/2
graspTransMat = transformations.translation_matrix([0,0,distance])
graspMat = transformations.concatenate_matrices(preGraspMat, graspTransMat)
#print preGraspMat
#print graspTransMat
#print graspMat
p = transformations.translation_from_matrix(graspMat)
#Publish grasp transform for visualization
self._tf_broadcaster.sendTransform(
(p[0],p[1],p[2]),
(o.x, o.y, o.x, o.w),
o_constraint.header.stamp,
"grasp",
o_constraint.header.frame_id)
pos_constraint = PositionConstraint()
pos_constraint.header = o_constraint.header
pos_constraint.link_name = self.toolLinkName
pos_constraint.position = Point(p[0],p[1],p[2])
pos_constraint.constraint_region_shape.type = Shape.SPHERE
pos_constraint.constraint_region_shape.dimensions = [0.01]#[0.01, 0.01, 0.01]
pos_constraint.weight = 1
motion_plan_request.goal_constraints.position_constraints.append(pos_constraint)
graspGoal.motion_plan_request = motion_plan_request
return graspGoal
示例12: mat2pose
def mat2pose(m):
trans = tr.translation_from_matrix(m)
rot = tr.quaternion_from_matrix(m)
p = Pose()
p.position.x = trans[0]
p.position.y = trans[1]
p.position.z = trans[2]
p.orientation.x = rot[0]
p.orientation.y = rot[1]
p.orientation.z = rot[2]
p.orientation.w = rot[3]
return p
示例13: transformPose
def transformPose(mat44, ps):
# pose44 is the given pose as a 4x4
pose44 = numpy.dot(tf.listener.xyz_to_mat44(ps.pose.position), tf.listener.xyzw_to_mat44(ps.pose.orientation))
# txpose is the new pose in target_frame as a 4x4
txpose = numpy.dot(mat44, pose44)
# xyz and quat are txpose's position and orientation
xyz = tuple(transformations.translation_from_matrix(txpose))[:3]
quat = tuple(transformations.quaternion_from_matrix(txpose))
# assemble return value Pose
return geometry_msgs.msg.Pose(geometry_msgs.msg.Point(*xyz), geometry_msgs.msg.Quaternion(*quat))
示例14: calculate_bridge_and_base_pose
def calculate_bridge_and_base_pose(self, table_state, phi):
# Calculate bridge pose from cue position and phi
phi_radians = math.pi * phi / 180.0
cue_x, cue_y = self.get_cue_ball_pos(table_state)
bridge_x, bridge_y = self.get_bridge_pos(cue_x, cue_y, phi)
(qx, qy, qz, qw) = transformations.quaternion_about_axis(phi_radians, (0, 0, 1))
bridge_pos = transformations.translation_matrix([bridge_x, bridge_y, 0.0])
bridge_orient = transformations.quaternion_matrix([qx, qy, qz, qw])
bridge_pose = transformations.concatenate_matrices(bridge_pos, bridge_orient)
bridge_pose_pos = transformations.translation_from_matrix(bridge_pose)
bridge_pose_orient = transformations.quaternion_from_matrix(bridge_pose)
# Produce base pose via fixed transform from bridge pose
bridge_to_base_trans = transformations.translation_matrix([Constants.BRIDGE_IN_BASE_X, Constants.BRIDGE_IN_BASE_Y, Constants.BRIDGE_IN_BASE_Z])
bridge_to_base_rot = transformations.quaternion_matrix([Constants.BRIDGE_IN_BASE_QX, Constants.BRIDGE_IN_BASE_QY, Constants.BRIDGE_IN_BASE_QZ, Constants.BRIDGE_IN_BASE_QW])
bridge_to_base = transformations.concatenate_matrices(bridge_to_base_trans, bridge_to_base_rot)
base_to_bridge = transformations.inverse_matrix(bridge_to_base)
base_pose = transformations.concatenate_matrices(bridge_pose, base_to_bridge) # correct order?
base_pose_pos = transformations.translation_from_matrix(base_pose)
base_pose_orient = transformations.quaternion_from_matrix(base_pose)
print "BASE_POSE", base_pose, "BRIDGE_POSE", bridge_pose
return ((bridge_pose_pos, bridge_pose_orient), (base_pose_pos, base_pose_orient))
示例15: calENU
def calENU(self):
if(self.body.isNone() or self.headingAngle is None):
return False
else:
br = self.body.getRotationAroundZ()
q = rotation_matrix(br+self.headingAngle, (0,0,1))
t = translation_from_matrix(self.body.getMatrix())
t = translation_matrix(t)
self.enu.setMatrix(t).transformByMatrix(q)
return True
#eoc