本文整理汇总了Python中tf.TransformListener.transformPose方法的典型用法代码示例。如果您正苦于以下问题:Python TransformListener.transformPose方法的具体用法?Python TransformListener.transformPose怎么用?Python TransformListener.transformPose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tf.TransformListener
的用法示例。
在下文中一共展示了TransformListener.transformPose方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
def main():
rospy.init_node('tf_test')
rospy.loginfo("Node for testing actionlib server")
#from_link = '/head_color_camera_l_link'
#to_link = '/base_link'
from_link = '/base_link'
to_link = '/map'
tfl = TransformListener()
rospy.sleep(5)
t = rospy.Time(0)
mpose = PoseStamped()
mpose.pose.position.x = 1
mpose.pose.position.y = 0
mpose.pose.position.z = 0
mpose.pose.orientation.x = 0
mpose.pose.orientation.y = 0
mpose.pose.orientation.z = 0
mpose.pose.orientation.w = 0
mpose.header.frame_id = from_link
mpose.header.stamp = rospy.Time.now()
rospy.sleep(5)
mpose_transf = None
rospy.loginfo('Waiting for transform for some time...')
tfl.waitForTransform(to_link,from_link,t,rospy.Duration(5))
if tfl.canTransform(to_link,from_link,t):
mpose_transf = tfl.transformPose(to_link,mpose)
print mpose_transf
else:
rospy.logerr('Transformation is not possible!')
sys.exit(0)
示例2: TwistToPoseConverter
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
class TwistToPoseConverter(object):
def __init__(self):
self.ee_frame = rospy.get_param('~ee_frame', "/l_gripper_tool_frame")
self.twist_sub = rospy.Subscriber('/twist_in', TwistStamped, self.twist_cb)
self.pose_pub = rospy.Publisher('/pose_out', PoseStamped)
self.tf_listener = TransformListener()
def get_ee_pose(self, frame='/torso_lift_link', time=None):
"""Get current end effector pose from tf."""
try:
now = rospy.Time.now() if time is None else time
self.tf_listener.waitForTransform(frame, self.ee_frame, now, rospy.Duration(4.0))
pos, quat = self.tf_listener.lookupTransform(frame, self.ee_frame, now)
except (LookupException, ConnectivityException, ExtrapolationException, Exception) as e:
rospy.logwarn("[%s] TF Failure getting current end-effector pose:\r\n %s"
%(rospy.get_name(), e))
return None, None
return pos, quat
def twist_cb(self, ts):
"""Get current end effector pose and augment with twist command."""
cur_pos, cur_quat = self.get_ee_pose(ts.header.frame_id)
ps = PoseStamped()
ps.header.frame_id = ts.header.frame_id
ps.header.stamp = ts.header.stamp
ps.pose.position.x = cur_pos[0] + ts.twist.linear.x
ps.pose.position.y = cur_pos[1] + ts.twist.linear.y
ps.pose.position.z = cur_pos[2] + ts.twist.linear.z
twist_quat = trans.quaternion_from_euler(ts.twist.angular.x,
ts.twist.angular.y,
ts.twist.angular.z)
final_quat = trans.quaternion_multiply(twist_quat, cur_quat)
ps.pose.orientation = Quaternion(*final_quat)
try:
self.tf_listener.waitForTransform('/torso_lift_link', ps.header.frame_id,
ps.header.stamp, rospy.Duration(3.0))
pose_out = self.tf_listener.transformPose('/torso_lift_link', ps)
self.pose_pub.publish(pose_out)
except (LookupException, ConnectivityException, ExtrapolationException, Exception) as e:
rospy.logwarn("[%s] TF Failure transforming goal pose to torso_lift_link:\r\n %s"
%(rospy.get_name(), e))
示例3: calculate_goal_pose
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
class calculate_goal_pose(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded'],
input_keys=['marker_pose', 'goal_pose'],
output_keys=['goal_pose'])
self.tf = TransformListener(True, rospy.Duration(5))
self.DISTANCE = 0.40
def execute(self, userdata):
userdata.marker_pose.header.stamp = rospy.Time.now()
pose = self.tf.transformPose('/base_link', userdata.marker_pose)
p = [pose.pose.position.x, pose.pose.position.y, pose.pose.position.z]
q = [pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z, pose.pose.orientation.w]
rm = tfs.quaternion_matrix(q)
# assemble a new coordinate frame that has z-axis aligned with
# the vertical direction and x-axis facing the z-axis of the
# original frame
z = rm[:3, 2]
z[2] = 0
axis_x = tfs.unit_vector(z)
axis_z = tfs.unit_vector([0, 0, 1])
axis_y = np.cross(axis_x, axis_z)
rm = np.vstack((axis_x, axis_y, axis_z)).transpose()
# shift the pose along the x-axis
p += np.dot(rm, [self.DISTANCE, 0, 0])[:3]
userdata.goal_pose = pose
userdata.goal_pose.pose.position.x = p[0]
userdata.goal_pose.pose.position.y = p[1]
userdata.goal_pose.pose.position.z = p[2]
yaw = tfs.euler_from_matrix(rm)[2]
q = tfs.quaternion_from_euler(0, 0, yaw - math.pi)
userdata.goal_pose.pose.orientation.x = q[0]
userdata.goal_pose.pose.orientation.y = q[1]
userdata.goal_pose.pose.orientation.z = q[2]
userdata.goal_pose.pose.orientation.w = q[3]
return 'succeeded'
示例4: __init__
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
class HandHoldTeleop:
def __init__(self):
self.listener = TransformListener()
self.rate = rospy.Rate(100)
self.pub = rospy.Publisher('/cmd_vel', Twist)
self.root = rospy.get_param('root_link','arm_link')
self.tip = rospy.get_param('tip_link','gripper_link')
self.x_home = rospy.get_param('x_home', 0.35)
self.z_home = rospy.get_param('z_home', -0.2)
self.x_rate = rospy.get_param('x_rate', 8.0)
self.r_rate = rospy.get_param('r_rate', 5.0)
rospy.loginfo("Started")
while not rospy.is_shutdown():
gripper_pose = PoseStamped()
gripper_pose.header.frame_id = self.tip
try:
pose = self.listener.transformPose(self.root, gripper_pose).pose
if pose.position.z > self.z_home:
msg = Twist()
msg.linear.x = (pose.position.x - self.x_home) * self.x_rate
msg.angular.z = pose.position.y * self.r_rate
print msg.linear.x, msg.angular.z
self.pub.publish(msg)
else:
self.pub.publish(Twist())
except:
pass
#rospy.logerr("Transform failed")
self.rate.sleep();
self.pub.publish(Twist())
示例5: __init__
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
#.........这里部分代码省略.........
p.point.y = sign * random.uniform(1.5, 0.5)
p.point.z = random.uniform(1.7, 0.2)
self._head_buff.put((copy.deepcopy(p), 0.1, True))
p.point.y = -1 * sign * random.uniform(1.5, 0.5)
p.point.z = random.uniform(1.7, 0.2)
self._head_buff.put((copy.deepcopy(p), 0.1, True))
rospy.loginfo("Looking around")
rospy.sleep(1)
def getPointDist(self, pt):
assert(self.face is not None)
# fist, get my position
p = PoseStamped()
p.header.frame_id = "base_link"
p.header.stamp = rospy.Time.now() - rospy.Duration(0.5)
p.pose.position.x = 0
p.pose.position.y = 0
p.pose.position.z = 0
p.pose.orientation.x = 0
p.pose.orientation.y = 0
p.pose.orientation.z = 0
p.pose.orientation.w = 1
try:
self._tf.waitForTransform(p.header.frame_id, self._robot_frame, p.header.stamp, rospy.Duration(2))
p = self._tf.transformPose(self._robot_frame, p)
except:
rospy.logerr("TF error!")
return None
return sqrt(pow(p.pose.position.x - pt.point.x, 2) + pow(p.pose.position.y - pt.point.y, 2) + pow(p.pose.position.z - pt.point.z, 2))
def getPoseStamped(self, group, c):
assert(len(c) == 6)
p = PoseStamped()
p.header.frame_id = "base_link"
p.header.stamp = rospy.Time.now() - rospy.Duration(0.5)
p.pose.position.x = c[0]
p.pose.position.y = c[1]
p.pose.position.z = c[2]
quat = tf.transformations.quaternion_from_euler(c[3], c[4], c[5])
p.pose.orientation.x = quat[0]
p.pose.orientation.y = quat[1]
p.pose.orientation.z = quat[2]
p.pose.orientation.w = quat[3]
try:
示例6: MarkerProcessor
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
class MarkerProcessor(object):
def __init__(self, use_dummy_transform=False):
rospy.init_node('star_center_positioning_node')
if use_dummy_transform:
self.odom_frame_name = ROBOT_NAME+"_odom_dummy"
else:
self.odom_frame_name = ROBOT_NAME+"_odom"
self.marker_locators = {}
self.add_marker_locator(MarkerLocator(0,(-6*12*2.54/100.0,0),0))
self.add_marker_locator(MarkerLocator(1,(0.0,0.0),pi))
self.add_marker_locator(MarkerLocator(2,(0.0,10*12*2.54/100.0),0))
self.add_marker_locator(MarkerLocator(3,(-6*12*2.54/100.0,6*12*2.54/100.0),0))
self.add_marker_locator(MarkerLocator(4,(0.0,6*12*2.54/100.0),pi))
self.pose_correction = rospy.get_param('~pose_correction',0.0)
self.phase_offset = rospy.get_param('~phase_offset',0.0)
self.is_flipped = rospy.get_param('~is_flipped',False)
srv = Server(STARPoseConfig, self.config_callback)
self.marker_sub = rospy.Subscriber("ar_pose_marker",
ARMarkers,
self.process_markers)
self.odom_sub = rospy.Subscriber(ROBOT_NAME+"/odom", Odometry, self.process_odom, queue_size=10)
self.star_pose_pub = rospy.Publisher(ROBOT_NAME+"/STAR_pose",PoseStamped,queue_size=10)
self.continuous_pose = rospy.Publisher(ROBOT_NAME+"/STAR_pose_continuous",PoseStamped,queue_size=10)
self.tf_listener = TransformListener()
self.tf_broadcaster = TransformBroadcaster()
def add_marker_locator(self, marker_locator):
self.marker_locators[marker_locator.id] = marker_locator
def config_callback(self, config, level):
self.phase_offset = config.phase_offset
self.pose_correction = config.pose_correction
return config
def process_odom(self, msg):
p = PoseStamped(header=Header(stamp=rospy.Time(0), frame_id=self.odom_frame_name),
pose=msg.pose.pose)
try:
STAR_pose = self.tf_listener.transformPose("STAR", p)
STAR_pose.header.stamp = msg.header.stamp
self.continuous_pose.publish(STAR_pose)
except Exception as inst:
print "error is", inst
def process_markers(self, msg):
for marker in msg.markers:
# do some filtering basd on prior knowledge
# we know the approximate z coordinate and that all angles but yaw should be close to zero
euler_angles = euler_from_quaternion((marker.pose.pose.orientation.x,
marker.pose.pose.orientation.y,
marker.pose.pose.orientation.z,
marker.pose.pose.orientation.w))
angle_diffs = TransformHelpers.angle_diff(euler_angles[0],pi), TransformHelpers.angle_diff(euler_angles[1],0)
print angle_diffs, marker.pose.pose.position.z
if (marker.id in self.marker_locators and
3.0 <= marker.pose.pose.position.z <= 3.6 and
fabs(angle_diffs[0]) <= .4 and
fabs(angle_diffs[1]) <= .4):
print "FOUND IT!"
locator = self.marker_locators[marker.id]
xy_yaw = list(locator.get_camera_position(marker))
if self.is_flipped:
print "WE ARE FLIPPED!!!"
xy_yaw[2] += pi
print self.pose_correction
print self.phase_offset
xy_yaw[0] += self.pose_correction*cos(xy_yaw[2]+self.phase_offset)
xy_yaw[1] += self.pose_correction*sin(xy_yaw[2]+self.phase_offset)
orientation_tuple = quaternion_from_euler(0,0,xy_yaw[2])
pose = Pose(position=Point(x=-xy_yaw[0],y=-xy_yaw[1],z=0),
orientation=Quaternion(x=orientation_tuple[0], y=orientation_tuple[1], z=orientation_tuple[2], w=orientation_tuple[3]))
# TODO: use markers timestamp instead of now() (unfortunately, not populated currently by ar_pose)
pose_stamped = PoseStamped(header=Header(stamp=msg.header.stamp,frame_id="STAR"),pose=pose)
# TODO: use frame timestamp instead of now()
self.star_pose_pub.publish(pose_stamped)
self.fix_STAR_to_odom_transform(pose_stamped)
def fix_STAR_to_odom_transform(self, msg):
""" Super tricky code to properly update map to odom transform... do not modify this... Difficulty level infinity. """
(translation, rotation) = TransformHelpers.convert_pose_inverse_transform(msg.pose)
p = PoseStamped(pose=TransformHelpers.convert_translation_rotation_to_pose(translation,rotation),header=Header(stamp=rospy.Time(),frame_id=ROBOT_NAME+"_base_link"))
try:
self.tf_listener.waitForTransform(ROBOT_NAME+"_odom",ROBOT_NAME+"_base_link",rospy.Time(),rospy.Duration(1.0))
except Exception as inst:
print "whoops", inst
return
print "got transform"
self.odom_to_STAR = self.tf_listener.transformPose(ROBOT_NAME+"_odom", p)
(self.translation, self.rotation) = TransformHelpers.convert_pose_inverse_transform(self.odom_to_STAR.pose)
def broadcast_last_transform(self):
""" Make sure that we are always broadcasting the last map to odom transformation.
This is necessary so things like move_base can work properly. """
if not(hasattr(self,'translation') and hasattr(self,'rotation')):
#.........这里部分代码省略.........
示例7: frame
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
#.........这里部分代码省略.........
""" Make sure the particle weights define a valid distribution (i.e. sum to 1.0) """
#Soma total das probabilidades das particulas
w_sum = sum([p.w for p in self.particle_cloud])
#Dividir cada probabilidade de uma particula pela soma total
for particle in self.particle_cloud:
particle.w /= w_sum
# TODO: implement this
def publish_particles(self, msg):
print("Publicar Particulas.")
print(len(self.particle_cloud))
particles_conv = []
for p in self.particle_cloud:
particles_conv.append(p.as_pose())
# actually send the message so that we can view it in rviz
self.particle_pub.publish(PoseArray(header=Header(stamp=rospy.Time.now(),
frame_id=self.map_frame),
poses=particles_conv))
def scan_received(self, msg):
""" This is the default logic for what to do when processing scan data.
Feel free to modify this, however, I hope it will provide a good
guide. The input msg is an object of type sensor_msgs/LaserScan """
if not(self.initialized):
print("Not Initialized")
# wait for initialization to complete
return
if not(self.tf_listener.canTransform(self.base_frame,msg.header.frame_id,rospy.Time(0))):
print("Not 2")
# need to know how to transform the laser to the base frame
# this will be given by either Gazebo or neato_node
return
if not(self.tf_listener.canTransform(self.base_frame,self.odom_frame,rospy.Time(0))):
print("Not 3")
# need to know how to transform between base and odometric frames
# this will eventually be published by either Gazebo or neato_node
return
# calculate pose of laser relative ot the robot base
p = PoseStamped(header=Header(stamp=rospy.Time(0),
frame_id=msg.header.frame_id))
self.laser_pose = self.tf_listener.transformPose(self.base_frame,p)
# find out where the robot thinks it is based on its odometry
p = PoseStamped(header=Header(stamp = rospy.Time(0),
frame_id=self.base_frame),
pose=Pose())
self.odom_pose = self.tf_listener.transformPose(self.odom_frame, p)
# store the the odometry pose in a more convenient format (x,y,theta)
new_odom_xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)
print("PASSOU")
if not(self.particle_cloud):
# now that we have all of the necessary transforms we can update the particle cloud
self.initialize_particle_cloud()
# cache the last odometric pose so we can only update our particle filter if we move more than self.d_thresh or self.a_thresh
self.current_odom_xy_theta = new_odom_xy_theta
# update our map to odom transform now that the particles are initialized
self.fix_map_to_odom_transform(msg)
elif (math.fabs(new_odom_xy_theta[0] - self.current_odom_xy_theta[0]) > self.d_thresh or
math.fabs(new_odom_xy_theta[1] - self.current_odom_xy_theta[1]) > self.d_thresh or
math.fabs(new_odom_xy_theta[2] - self.current_odom_xy_theta[2]) > self.a_thresh):
# we have moved far enough to do an update!
self.update_particles_with_odom(msg) # update based on odometry
self.update_particles_with_laser(msg) # update based on laser scan
self.update_robot_pose() # update robot's pose
self.resample_particles() # resample particles to focus on areas of high density
self.fix_map_to_odom_transform(msg) # update map to odom transform now that we have new particles
# publish particles (so things like rviz can see them)
self.publish_particles(msg)
# direcionar particulas quando um obstaculo é identificado.
def fix_map_to_odom_transform(self, msg):
print("Fix Map to Odom Transform")
""" This method constantly updates the offset of the map and
odometry coordinate systems based on the latest results from
the localizer """
(translation, rotation) = convert_pose_inverse_transform(self.robot_pose)
p = PoseStamped(pose=convert_translation_rotation_to_pose(translation,rotation),
header=Header(stamp=rospy.Time(0),frame_id=self.base_frame))
self.odom_to_map = self.tf_listener.transformPose(self.odom_frame, p)
(self.translation, self.rotation) = convert_pose_inverse_transform(self.odom_to_map.pose)
def broadcast_last_transform(self):
print("Broadcast")
""" Make sure that we are always broadcasting the last map
to odom transformation. This is necessary so things like
move_base can work properly. """
if not(hasattr(self,'translation') and hasattr(self,'rotation')):
return
self.tf_broadcaster.sendTransform(self.translation,
self.rotation,
rospy.get_rostime(),
self.odom_frame,
self.map_frame)
示例8: ArmByFtWrist
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
#.........这里部分代码省略.........
self.frame_fixer = WrenchFixer(*args)
return config
def follow_pose_cb(self, data):
if data.header.frame_id[0] == '/':
frame_id = data.header.frame_id[1:]
else:
frame_id = data.header.frame_id
if frame_id != self.global_frame_id:
rospy.logwarn(
"Poses to follow should be in frame " + self.global_frame_id +
" and is in " + str(frame_id) + ", transforming into " + self.global_frame_id + "...")
world_ps = self.transform_pose_to_world(
data.pose, from_frame=data.header.frame_id)
ps = PoseStamped()
ps.header.stamp = data.header.stamp
ps.header.frame_id = self.global_frame_id
ps.pose = world_ps
self.last_pose_to_follow = ps
else:
self.last_pose_to_follow = data
def transform_pose_to_world(self, pose, from_frame="arm_right_tool_link"):
ps = PoseStamped()
ps.header.stamp = self.tf_l.getLatestCommonTime(
self.global_frame_id, from_frame)
ps.header.frame_id = "/" + from_frame
# TODO: check pose being PoseStamped or Pose
ps.pose = pose
transform_ok = False
while not transform_ok and not rospy.is_shutdown():
try:
world_ps = self.tf_l.transformPose(self.global_frame_id, ps)
transform_ok = True
return world_ps
except ExtrapolationException as e:
rospy.logwarn(
"Exception on transforming pose... trying again \n(" + str(e) + ")")
rospy.sleep(0.05)
ps.header.stamp = self.tf_l.getLatestCommonTime(
self.global_frame_id, from_frame)
def wrench_cb(self, data):
self.last_wrench = data
def get_initial_pose(self):
zero_pose = Pose()
zero_pose.orientation.w = 1.0
current_pose = self.transform_pose_to_world(
zero_pose, from_frame=self.goal_frame_id)
return current_pose
def get_abs_total_force(self, wrench_msg):
f = wrench_msg.wrench.force
return abs(f.x) + abs(f.y) + abs(f.z)
def get_abs_total_torque(self, wrench_msg):
t = wrench_msg.wrench.torque
return abs(t.x) + abs(t.y) + abs(t.z)
def run(self):
rospy.loginfo(
"Waiting for first wrench message on: " + str(self.wrench_sub.resolved_name))
while not rospy.is_shutdown() and self.last_wrench is None:
示例9: frame
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
#.........这里部分代码省略.........
# TODO create particles
self.particle_cloud = self.create_initial_particle_list(xy_theta)
self.normalize_particles()
self.update_robot_pose()
def normalize_particles(self):
""" Make sure the particle weights define a valid distribution (i.e. sum to 1.0) """
w_sum = sum([p.w for p in self.particle_cloud])
for particle in self.particle_cloud:
particle.w /= w_sum
# TODO: implement this
def publish_particles(self, msg):
print('publish_particles')
particles_conv = []
for p in self.particle_cloud:
particles_conv.append(p.as_pose())
# actually send the message so that we can view it in rviz
self.particle_pub.publish(PoseArray(header=Header(stamp=rospy.Time.now(),
frame_id=self.map_frame),
poses=particles_conv))
def scan_received(self, msg):
""" This is the default logic for what to do when processing scan data.
Feel free to modify this, however, I hope it will provide a good
guide. The input msg is an object of type sensor_msgs/LaserScan """
print('scan_received')
if not(self.initialized):
# wait for initialization to complete
return
if not(self.tf_listener.canTransform(self.base_frame,msg.header.frame_id,msg.header.stamp)):
# need to know how to transform the laser to the base frame
# this will be given by either Gazebo or neato_node
return
if not(self.tf_listener.canTransform(self.base_frame,self.odom_frame,msg.header.stamp)):
# need to know how to transform between base and odometric frames
# this will eventually be published by either Gazebo or neato_node
return
# calculate pose of laser relative ot the robot base
p = PoseStamped(header=Header(stamp=rospy.Time(0),
frame_id=msg.header.frame_id))
self.laser_pose = self.tf_listener.transformPose(self.base_frame,p)
# find out where the robot thinks it is based on its odometry
p = PoseStamped(header=Header(stamp=rospy.Time(0),
frame_id=self.base_frame),
pose=Pose())
self.odom_pose = self.tf_listener.transformPose(self.odom_frame, p)
# store the the odometry pose in a more convenient format (x,y,theta)
new_odom_xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)
if not(self.particle_cloud):
# now that we have all of the necessary transforms we can update the particle cloud
self.initialize_particle_cloud()
# cache the last odometric pose so we can only update our particle filter if we move more than self.d_thresh or self.a_thresh
self.current_odom_xy_theta = new_odom_xy_theta
# update our map to odom transform now that the particles are initialized
self.fix_map_to_odom_transform(msg)
elif (math.fabs(new_odom_xy_theta[0] - self.current_odom_xy_theta[0]) > self.d_thresh or
math.fabs(new_odom_xy_theta[1] - self.current_odom_xy_theta[1]) > self.d_thresh or
math.fabs(new_odom_xy_theta[2] - self.current_odom_xy_theta[2]) > self.a_thresh):
# we have moved far enough to do an update!
self.update_particles_with_odom(msg) # update based on odometry
self.update_particles_with_laser(msg) # update based on laser scan
self.update_robot_pose() # update robot's pose
self.resample_particles() # resample particles to focus on areas of high density
self.fix_map_to_odom_transform(msg) # update map to odom transform now that we have new particles
# publish particles (so things like rviz can see them)
self.publish_particles(msg)
def fix_map_to_odom_transform(self, msg):
""" This method constantly updates the offset of the map and
odometry coordinate systems based on the latest results from
the localizer """
(translation, rotation) = convert_pose_inverse_transform(self.robot_pose)
p = PoseStamped(pose=convert_translation_rotation_to_pose(translation,rotation),
header=Header(stamp=rospy.Time(0),frame_id=self.base_frame))
self.odom_to_map = self.tf_listener.transformPose(self.odom_frame, p)
(self.translation, self.rotation) = convert_pose_inverse_transform(self.odom_to_map.pose)
def broadcast_last_transform(self):
""" Make sure that we are always broadcasting the last map
to odom transformation. This is necessary so things like
move_base can work properly. """
if not(hasattr(self,'translation') and hasattr(self,'rotation')):
return
print('broadcast')
self.tf_broadcaster.sendTransform(self.translation,
self.rotation,
rospy.Time.now(),
self.odom_frame,
self.map_frame)
示例10: ArmActions
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
#.........这里部分代码省略.........
# ends = [[x_plus,x_minus],[y_plus, y_minus],[z_plus,z_minus]]
# side = [[0]*7]*3
# for i, part in enumerate(parts):
# if part >=0:
# side[i] = ends[i][0]
# else:
# side[i] = ends[i][1]
#
# ang_out = curr_angs
# for i in range(3):
# ang_out -= np.average(side, 0, parts)
# except:
# print 'Near Joint Limits!'
# self.aul.send_joint_angles(ang_out)
#
# #print 'x: ', x_plus, x_minus
# maintain_rate.sleep()
# mean = self.force_goal_mean
# std = self.force_goal_std
def maintain_net_force(self, mean=0, std=8):
self.stop_maintain = False
maintain_rate = rospy.Rate(100)
while not (rospy.is_shutdown() or self.stop_maintain):
if self.ft_mag > mean + 3:
#print 'Moving to avoid force'
goal = PoseStamped()
goal.header.frame_id = 'l_netft_frame'
goal.header.stamp = rospy.Time(0)
goal.pose.position.x = -np.clip(self.ft.wrench.force.x/2500, -0.1, 0.1)
goal.pose.position.y = -np.clip(self.ft.wrench.force.y/2500, -0.1, 0.1)
goal.pose.position.z = -np.clip(self.ft.wrench.force.z/2500, -0.1, 0.1)+0.052
goal = self.tfl.transformPose('/torso_lift_link', goal)
goal.header.stamp = rospy.Time.now()
goal.pose.orientation = self.aul.curr_pose().pose.orientation
self.test_pose.publish(goal)
self.aul.fast_move(goal, 0.2)
rospy.sleep(0.05)
maintain_rate.sleep()
mean = self.force_goal_mean
std = self.force_goal_std
#def mannequin(self):
#mannequin_rate=rospy.Rate(10)
#while not rospy.is_shutdown():
#joints = np.add(np.array(self.aul.joint_state_act.positions),
# np.clip(np.array(self.aul.joint_state_err.positions), -0.05, 0.05))
#joints = self.aul.joint_state_act.positions
#print joints
#raw_input('Review Joint Angles')
#self.aul.send_joint_angles(joints,0.1)
#mannequin_rate.sleep()
def force_wipe_agg(self, ps):
self.aul.update_frame(ps)
rospy.sleep(0.1)
pose = self.aul.find_approach(ps, 0)
(goal_reachable, ik_goal) = self.aul.ik_check(pose)
if goal_reachable:
if not self.force_wipe_started:
appr = self.aul.find_approach(ps, 0.20)
(appr_reachable, ik_goal) = self.aul.ik_check(appr)
self.test_pose.publish(appr)
示例11: Controller
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
#.........这里部分代码省略.........
def _cmdVelTelopChanged(self, data):
self.cmd_vel_telop = data
if self.state == Controller.Manual:
self.pubNav.publish(data)
def pidReset(self):
self.pidX.reset()
self.pidZ.reset()
self.pidZ.reset()
self.pidYaw.reset()
def _joyChanged(self, data):
if len(data.buttons) == len(self.lastJoy.buttons):
delta = np.array(data.buttons) - np.array(self.lastJoy.buttons)
print ("Buton ok")
#Button 1
if delta[0] == 1 and self.state != Controller.Automatic:
print("Automatic!")
thrust = self.cmd_vel_telop.linear.z
print(thrust)
self.pidReset()
self.pidZ.integral = thrust / self.pidZ.ki
self.targetZ = 0.5
self.state = Controller.Automatic
#Button 2
if delta[1] == 1 and self.state != Controller.Manual:
print("Manual!")
self.pubNav.publish(self.cmd_vel_telop)
self.state = Controller.Manual
#Button 3
if delta[2] == 1:
self.state = Controller.TakeOff
print("TakeOff!")
#Button 5
if delta[4] == 1:
self.targetZ += 0.1
print(self.targetZ)
#Button 6
if delta[5] == 1:
self.targetZ -= 0.1
print(self.targetZ)
self.lastJoy = data
def run(self):
thrust = 0
print("jello")
while not rospy.is_shutdown():
if self.state == Controller.TakeOff:
t = self.listener.getLatestCommonTime("/mocap", "/Nano_Mark")
if self.listener.canTransform("/mocap", "/Nano_Mark", t):
position, quaternion = self.listener.lookupTransform("/mocap", "/Nano_Mark", t)
if position[2] > 0.1 or thrust > 50000:
self.pidReset()
self.pidZ.integral = thrust / self.pidZ.ki
self.targetZ = 0.5
self.state = Controller.Automatic
thrust = 0
else:
thrust += 100
msg = self.cmd_vel_telop
msg.linear.z = thrust
self.pubNav.publish(msg)
if self.state == Controller.Automatic:
# transform target world coordinates into local coordinates
t = self.listener.getLatestCommonTime("/Nano_Mark", "/mocap")
print(t);
if self.listener.canTransform("/Nano_Mark", "/mocap", t):
targetWorld = PoseStamped()
targetWorld.header.stamp = t
targetWorld.header.frame_id = "mocap"
targetWorld.pose.position.x = 0
targetWorld.pose.position.y = 0
targetWorld.pose.position.z = self.targetZ
quaternion = tf.transformations.quaternion_from_euler(0, 0, 0)
targetWorld.pose.orientation.x = quaternion[0]
targetWorld.pose.orientation.y = quaternion[1]
targetWorld.pose.orientation.z = quaternion[2]
targetWorld.pose.orientation.w = quaternion[3]
targetDrone = self.listener.transformPose("/Nano_Mark", targetWorld)
quaternion = (
targetDrone.pose.orientation.x,
targetDrone.pose.orientation.y,
targetDrone.pose.orientation.z,
targetDrone.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
#msg = self.cmd_vel_teleop
msg.linear.x = self.pidX.update(0.0, targetDrone.pose.position.x)
msg.linear.y = self.pidY.update(0.0, targetDrone.pose.position.y)
msg.linear.z = self.pidZ.update(0.0, targetDrone.pose.position.z) #self.pidZ.update(position[2], self.targetZ)
msg.angular.z = self.pidYaw.update(0.0, euler[2])
#print(euler[2])
#print(msg.angular.z)
self.pubNav.publish(msg)
rospy.sleep(0.01)
示例12: ArmCommander
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
#.........这里部分代码省略.........
input.append(eef_pose)
else:
raise TypeError("ArmCommander.get_ik() accepts only a list of Postamped or [[x, y, z], [x, y, z, w]], got {}".format(str(type(eef_pose))))
output = self._kinematics_services['ik'][self._selected_ik if source is None else source]['func'](input, seeds, params)
return output if len(eef_poses)>1 else output[0]
def get_fk(self, frame_id=None, robot_state=None):
"""
Return The FK solution oth this robot state according to the method declared in the constructor
robot_state = None will give the current endpoint pose in frame_id
:param robot_state: a RobotState message
:param frame_id: the frame you want the endpoint pose into
:return: [[x, y, z], [x, y, z, w]]
"""
if frame_id is None:
frame_id = self._world
if isinstance(robot_state, RobotState) or robot_state is None:
return self._kinematics_services['fk'][self._selected_fk]['func'](frame_id, robot_state)
else:
raise TypeError("ArmCommander.get_fk() accepts only a RobotState, got {}".format(str(type(robot_state))))
def _get_fk_pykdl(self, frame_id, state=None):
if state is None:
state = self.get_current_state()
fk = self._kinematics_pykdl.forward_position_kinematics(dict(zip(state.joint_state.name, state.joint_state.position)))
return [fk[:3], fk[-4:]]
def _get_fk_robot(self, frame_id = None, state=None):
# Keep this half-working FK, still used by generate_cartesian_path (trajectories.py)
if state is not None:
raise NotImplementedError("_get_fk_robot has no FK service provided by the robot except for its current endpoint pose")
ps = list_to_pose(self.endpoint_pose(), self._world)
return self._tf_listener.transformPose(frame_id, ps)
def _get_fk_ros(self, frame_id = None, state=None):
rqst = GetPositionFKRequest()
rqst.header.frame_id = self._world if frame_id is None else frame_id
rqst.fk_link_names = [self.endpoint_name()]
if isinstance(state, RobotState):
rqst.robot_state = state
elif isinstance(state, JointState):
rqst.robot_state.joint_state = state
elif state is None:
rqst.robot_state = self.get_current_state()
else:
raise AttributeError("Provided state is an invalid type")
fk_answer = self._kinematics_services['fk']['ros']['service'].call(rqst)
if fk_answer.error_code.val==1:
return fk_answer.pose_stamped[0]
else:
return None
def _get_ik_pykdl(self, eef_poses, seeds=(), params=None):
solutions = []
for pose_num, eef_pose in enumerate(eef_poses):
if eef_pose.header.frame_id.strip('/') != self._world.strip('/'):
raise NotImplementedError("_get_ik_pykdl: Baxter PyKDL implementation does not accept frame_ids other than {}".format(self._world))
pose = pose_to_list(eef_pose)
resp = self._kinematics_pykdl.inverse_kinematics(pose[0], pose[1],
[seeds[pose_num].joint_state.position[seeds[pose_num].joint_state.name.index(joint)]
for joint in self.joint_names()] if len(seeds)>0 else None)
if resp is None:
rs = None
示例13: ArmIntermediary
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
#.........这里部分代码省略.........
(reachable, ik_goal) = self.pr2_arm.full_ik_check(self.wipe_ends[0])
if not reachable:
rospy.loginfo("Cannot find approach for initial wipe position,\
please try another")
self.wt_log_out.publish(data="Cannot find approach for initial\
wipe position, please try another")
return None
else:
self.wipe_started = True
rospy.loginfo("Received starting position for wiping action")
self.wt_log_out.publish(data="Received starting position for\
wiping action")
return None
else:
self.wipe_ends[1] = pose_utils.find_approach(ps, 0)
self.wipe_ends.reverse()
(reachable, ik_goal) = self.pr2_arm.full_ik_check(self.wipe_ends[1])
if not reachable:
rospy.loginfo("Cannot find approach for final wipe position,\
please try another")
self.wt_log_out.publish(data="Cannot find approach for final\
wipe position, please try another")
return None
else:
rospy.loginfo("Received End position for wiping action")
self.wt_log_out.publish(data="Received End position for wiping\
action")
self.wipe_ends[1].header.stamp = rospy.Time.now()
self.tfl.waitForTransform(self.wipe_ends[1].header.frame_id,
'rh_utility_frame',
rospy.Time.now(),
rospy.Duration(3.0))
fin_in_start = self.tfl.transformPose('rh_utility_frame',
self.wipe_ends[1])
ang = math.atan2(-fin_in_start.pose.position.z,
-fin_in_start.pose.position.y)+(math.pi/2)
q_st_rot = transformations.quaternion_about_axis(ang, (1,0,0))
q_st_new = transformations.quaternion_multiply(
[self.wipe_ends[0].pose.orientation.x,
self.wipe_ends[0].pose.orientation.y,
self.wipe_ends[0].pose.orientation.z,
self.wipe_ends[0].pose.orientation.w],
q_st_rot)
self.wipe_ends[0].pose.orientation = Quaternion(*q_st_new)
self.wipe_ends[0].header.stamp = rospy.Time.now()
self.tfl.waitForTransform(self.wipe_ends[0].header.frame_id,
'rh_utility_frame',
rospy.Time.now(),
rospy.Duration(3.0))
start_in_fin = self.tfl.transformPose('rh_utility_frame',
self.wipe_ends[0])
ang = math.atan2(start_in_fin.pose.position.z,
start_in_fin.pose.position.y)+(math.pi/2)
q_st_rot = transformations.quaternion_about_axis(ang, (1,0,0))
q_st_new = transformations.quaternion_multiply(
[self.wipe_ends[1].pose.orientation.x,
self.wipe_ends[1].pose.orientation.y,
self.wipe_ends[1].pose.orientation.z,
self.wipe_ends[1].pose.orientation.w],
q_st_rot)
self.wipe_ends[1].pose.orientation = Quaternion(*q_st_new)
appr = pose_utils.find_approach(self.wipe_appr_seed, 0.15)
示例14: __init__
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
class OccupancyGridMapper:
""" Implements simple occupancy grid mapping """
def __init__(self):
cv2.namedWindow("map")
#Establish mapping conventions
self.origin = [-10, -10]
self.seq = 0
self.resolution = .1
self.n = 200
self.p_occ = .5
self.odds_ratio_hit = 3.0
self.odds_ratio_miss = .2
self.odds_ratios = self.p_occ / (1 - self.p_occ) * np.ones((self.n, self.n))
self.tf_listener = TransformListener()
#Subscribers and Publishers
rospy.Subscriber("scan", LaserScan, self.process_scan, queue_size=1)
self.pub = rospy.Publisher("map", OccupancyGrid)
self.ycoor_pub = rospy.Publisher("/yellow_coords", Vector3)
self.bcoor_pub = rospy.Publisher("/blue_coords", Vector3)
self.rcoor_pub = rospy.Publisher("/red_coords", Vector3)
self.gcoor_pub = rospy.Publisher("/green_coords", Vector3)
self.coords_sub_red = rospy.Subscriber('ball_coords_red', Vector3, self.coordinate_to_map_red)
self.coords_sub_green = rospy.Subscriber('ball_coords_green', Vector3, self.coordinate_to_map_green)
self.coords_sub_blue = rospy.Subscriber('ball_coords_blue', Vector3, self.coordinate_to_map_blue)
self.coords_sub_yellow = rospy.Subscriber('ball_coords_yellow', Vector3, self.coordinate_to_map_yellow)
#Camera translations
#TODO Add stuff for each color, so can map more than one at a time
self.frame_height = 480
self.frame_width = 640
self.depth_proportion = -0.013
self.depth_intercept = 2.105
#85pixels - 1meter
#62pixels - 1.3meter
self.red = (0, 0, 255)
self.yellow = (0, 255, 255)
self.blue = (255, 0, 0)
self.green = (0, 255, 0)
self.depth_yellow = 0
self.y_transform_yellow = 0
self.x_transform_yellow = 0
self.angle_diff_yellow = 0
self.depth_red = 0
self.y_transform_red = 0
self.x_transform_red = 0
self.angle_diff_red = 0
self.depth_green = 0
self.y_transform_green = 0
self.x_transform_green = 0
self.angle_diff_green = 0
self.depth_blue = 0
self.y_transform_blue = 0
self.x_transform_blue = 0
self.angle_diff_blue = 0
self.x_camera_red = -1
self.y_camera_red = -1
self.x_camera_blue = -1
self.y_camera_blue = -1
self.x_camera_green = -1
self.y_camera_green = -1
self.x_camera_yellow = -1
self.y_camera_yellow = -1
rospy.loginfo('Mapper running')
def is_in_map(self, x_ind, y_ind):
""" Return whether or not the given point is within the map boundaries """
return not (x_ind < self.origin[0] or
x_ind > self.origin[0] + self.n * self.resolution or
y_ind < self.origin[1] or
y_ind > self.origin[1] + self.n * self.resolution)
def process_scan(self, msg):
""" Callback function for the laser scan messages """
if len(msg.ranges) <= 330:
# throw out scans that don't have more than 90% of the data
return
# get pose according to the odometry
p = PoseStamped(header=Header(stamp=msg.header.stamp, frame_id="base_link"), pose=Pose())
self.odom_pose = self.tf_listener.transformPose("odom", p)
self.base_pose = self.tf_listener.transformPose("base_laser_link", p)
# convert the odom pose to the tuple (x,y,theta)
self.odom_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.odom_pose.pose)
#(-0.0069918, 0.000338577, 0.048387097)
#(1.0208817, 0.04827240, 0.048387)
self.base_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.base_pose.pose)
for i in range(len(msg.ranges)):
if 0.0 < msg.ranges[i] < 5.0: #for any reding within 5 meters
#Using the pose and the measurement nd the angle, find it in the world
map_x = self.odom_pose[0] + msg.ranges[i] * cos(i * pi / 180.0 + self.odom_pose[2])
#.........这里部分代码省略.........
示例15: frame
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transformPose [as 别名]
#.........这里部分代码省略.........
xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)
self.particle_cloud = []
for i in range(self.n_particles):
self.particle_cloud.append(Particle(x=xy_theta[0]+gauss(0,0.25),y=xy_theta[1]+gauss(0,0.25),theta=xy_theta[2]+gauss(0,0.25)))
self.normalize_particles()
self.update_robot_pose()
def normalize_particles(self):
""" Make sure the particle weights define a valid distribution (i.e. sum to 1.0) """
w = [deepcopy(p.w) for p in self.particle_cloud]
z = sum(w)
print(z)
if z > 0:
for i in range(len(self.particle_cloud)):
self.particle_cloud[i].w = w[i] / z
else:
for i in range(len(self.particle_cloud)):
self.particle_cloud[i].w = 1/len(self.particle_cloud)
print(sum([p.w for p in self.particle_cloud]))
def publish_particles(self, msg):
particles_conv = []
for p in self.particle_cloud:
particles_conv.append(p.as_pose())
# actually send the message so that we can view it in rviz
self.particle_pub.publish(PoseArray(header=Header(stamp=rospy.Time.now(),
frame_id=self.map_frame),
poses=particles_conv))
def scan_received(self, msg):
""" This is the default logic for what to do when processing scan data.
Feel free to modify this, however, I hope it will provide a good
guide. The input msg is an object of type sensor_msgs/LaserScan """
if not(self.initialized):
# wait for initialization to complete
return
if not(self.tf_listener.canTransform(self.base_frame,msg.header.frame_id,msg.header.stamp)):
# need to know how to transform the laser to the base frame
# this will be given by either Gazebo or neato_node
return
if not(self.tf_listener.canTransform(self.base_frame,self.odom_frame,msg.header.stamp)):
# need to know how to transform between base and odometric frames
# this will eventually be published by either Gazebo or neato_node
return
# calculate pose of laser relative ot the robot base
p = PoseStamped(header=Header(stamp=rospy.Time(0),
frame_id=msg.header.frame_id))
self.laser_pose = self.tf_listener.transformPose(self.base_frame,p)
# find out where the robot thinks it is based on its odometry
p = PoseStamped(header=Header(stamp=msg.header.stamp,
frame_id=self.base_frame),
pose=Pose())
self.odom_pose = self.tf_listener.transformPose(self.odom_frame, p)
# store the the odometry pose in a more convenient format (x,y,theta)
new_odom_xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)
if not(self.particle_cloud):
# now that we have all of the necessary transforms we can update the particle cloud
self.initialize_particle_cloud()
# cache the last odometric pose so we can only update our particle filter if we move more than self.d_thresh or self.a_thresh
self.current_odom_xy_theta = new_odom_xy_theta
# update our map to odom transform now that the particles are initialized
self.fix_map_to_odom_transform(msg)
elif (math.fabs(new_odom_xy_theta[0] - self.current_odom_xy_theta[0]) > self.d_thresh or
math.fabs(new_odom_xy_theta[1] - self.current_odom_xy_theta[1]) > self.d_thresh or
math.fabs(new_odom_xy_theta[2] - self.current_odom_xy_theta[2]) > self.a_thresh):
# we have moved far enough to do an update!
self.update_particles_with_odom(msg) # update based on odometry
self.update_particles_with_laser(msg) # update based on laser scan
self.update_robot_pose() # update robot's pose
self.resample_particles() # resample particles to focus on areas of high density
self.fix_map_to_odom_transform(msg) # update map to odom transform now that we have new particles
# publish particles (so things like rviz can see them)
self.publish_particles(msg)
def fix_map_to_odom_transform(self, msg):
""" This method constantly updates the offset of the map and
odometry coordinate systems based on the latest results from
the localizer """
(translation, rotation) = convert_pose_inverse_transform(self.robot_pose)
p = PoseStamped(pose=convert_translation_rotation_to_pose(translation,rotation),
header=Header(stamp=msg.header.stamp,frame_id=self.base_frame))
self.odom_to_map = self.tf_listener.transformPose(self.odom_frame, p)
(self.translation, self.rotation) = convert_pose_inverse_transform(self.odom_to_map.pose)
def broadcast_last_transform(self):
""" Make sure that we are always broadcasting the last map
to odom transformation. This is necessary so things like
move_base can work properly. """
if not(hasattr(self,'translation') and hasattr(self,'rotation')):
return
self.tf_broadcaster.sendTransform(self.translation,
self.rotation,
rospy.get_rostime(),
self.odom_frame,
self.map_frame)