本文整理汇总了Python中tf.TransformListener.waitForTransform方法的典型用法代码示例。如果您正苦于以下问题:Python TransformListener.waitForTransform方法的具体用法?Python TransformListener.waitForTransform怎么用?Python TransformListener.waitForTransform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tf.TransformListener
的用法示例。
在下文中一共展示了TransformListener.waitForTransform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: subscriber_callback
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
def subscriber_callback(data):
occupancyMap = transformation(data.data, data.info.width, data.info.height)
way_points = find_goals(occupancyMap, data.info.width, data.info.height)
result = random.choice(way_points)
try:
planMaker = rospy.ServiceProxy('move_base/make_plan', nav_srv.GetPlan)
listener = TransformListener()
listener.waitForTransform("base_link", "map", rospy.Time(), rospy.Duration(4.0))
t = listener.getLatestCommonTime("base_link", "map")
position, quaternion = listener.lookupTransform("base_link", "map", t)
pos_x = position[0]
pos_y = position[1]
pos_z = position[2]
goal_robot = create_goal((result[1]-2000)*data.info.resolution,(result[0]-2000)*data.info.resolution)
#Make plan with 0.5 meter flexibility, from target pose and current pose (with same header)
start_pose = create_message(pos_x,pos_y,pos_z,quaternion)
plan = planMaker(
start_pose,
goal_robot.target_pose,
0.5)
action_server = actionlib.SimpleActionClient('move_base', mb_msg.MoveBaseAction);
action_server.wait_for_server()
send_goal(goal_robot, action_server)
except rospy.ServiceException, e:
print "plan service call failed: %s"%e
示例2: __init__
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
class FTPNode:
def __init__(self, *args):
print("init")
self.tf = TransformListener()
self.tt = Transformer()
rospy.Subscriber("/trackedHumans", TrackedHumans, self.pos_callback)
self.publisher = rospy.Publisher("directionmarker_array", MarkerArray)
def pos_callback(self, data):
rospy.loginfo("on updated pos, face robot towards guy...")
print("hi")
if (len(data.trackedHumans) > 0):
print(data.trackedHumans[0].location.point.x)
try:
self.tf.waitForTransform(data.trackedHumans[0].location.header.frame_id, "/base_link", rospy.Time.now(), rospy.Duration(2.0))
pp = self.tf.transformPoint("/base_link", data.trackedHumans[0].location)
print "Original:"
print [data.trackedHumans[0].location.point]
print "Transform:"
print pp.point
phi = math.atan2(pp.point.y, pp.point.x)
sss.move_base_rel("base", [0,0,phi])
print phi*180/math.pi
markerArray = MarkerArray()
marker = Marker()
marker.header.stamp = rospy.Time();
marker.ns = "my_namespace";
marker.id = 0;
marker.header.frame_id = "/base_link"
marker.type = marker.ARROW
marker.action = marker.ADD
marker.scale.x = .1
marker.scale.y = .1
marker.scale.z = .1
marker.color.a = 1.0
marker.color.r = 1.0
marker.color.g = 1.0
marker.color.b = 0.0
p1 = Point()
p1.x = 0
p1.y = 0
p1.z = 0
p2 = Point()
p2.x = pp.point.x
p2.y = pp.point.y
p2.z = 0
marker.points = [p1,p2]
#marker.pose.position.x = 1
#marker.pose.position.y = 0
#marker.pose.position.z = 1
#marker.pose.orientation.w = 1
markerArray.markers.append(marker)
self.publisher.publish(markerArray)
print "try ended"
except Exception as e:
print e
示例3: ChallengeProblemLogger
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
class ChallengeProblemLogger(object):
_knownObstacles = {}
_placedObstacle = False
_lastgzlog = 0.0
_tf_listener = None
def __init__(self,name):
self._name = name;
self._experiment_started = False
self._tf_listener = TransformListener()
# Subscribe to robot pose ground truth from gazebo
rospy.Subscriber("/gazebo/model_states", ModelStates, callback=self.gazebo_model_monitor,
queue_size=1)
# Whenever we get a report from Gazebo, map the gazebo coordinate to map coordinates and
# log this
# Only do this every second - this should be accurate enough
# TODO: model is assumed to be the third in the list. Need to make this based
# on the array to account for obstacles (maybe)
def gazebo_model_monitor(self, data):
if (len(data.pose))<=2:
return
data_time = rospy.get_rostime().to_sec()
if ((self._lastgzlog == 0.0) | (data_time - self._lastgzlog >= 1)):
# Only do this every second
# Get the turtlebot model state information (assumed to be indexed at 2)
tb_pose = data.pose[2]
tb_position = tb_pose.position
self._lastgzlog = data_time
# Do this only if the transform exists
if self._tf_listener.frameExists("/base_link") and self._tf_listener.frameExists("/map"):
self._tf_listener.waitForTransform("/map", "/base_link", rospy.Time(0), rospy.Duration(1))
(trans,rot) = self._tf_listener.lookupTransform("/map", "/base_link",rospy.Time(0))
rospy.loginfo("BRASS | Turtlebot | {},{}".format(trans[0], trans[1]))
# Log any obstacle information, but do it only once. This currently assumes one obstacle
# TODO: test this
if len(data.name) > 3:
addedObstacles = {}
removedObstacles = self._knownObstacles.copy()
for obs in range(3, len(data.name)-1):
if (data.name[obs] not in self._knownObstacles):
addedObstacles[data.name[obs]] = obs
else:
self._knownObstacles[data.name[obs]] = obs
del removedObstacles[data.name[obs]]
for key, value in removedObstacles.iteritems():
rospy.logInfo("BRASS | Obstacle {} | removed".format(key))
del self._knownObstacles[key]
for key, value in addedObstacles.iteritems():
obs_pos = data.pose[value].position
rospy.logInfo ("BRASS | Obstacle {} | {},{}".format(key, obs_pos.x, obs_pos.y))
self._knownObstacles[key] = value
示例4: CameraPointer
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
class CameraPointer(object):
def __init__(self, side, camera_ik):
self.side = side
self.camera_ik = camera_ik
self.joint_names = self.joint_angles_act = self.joint_angles_des = None
self.tfl = TransformListener()
self.joint_state_sub = rospy.Subscriber('/{0}_arm_controller/state'.format(self.side), JointTrajectoryControllerState, self.joint_state_cb)
self.joint_traj_pub = rospy.Publisher('/{0}_arm_controller/command'.format(self.side), JointTrajectory)
# Wait for joint information to become available
while self.joint_names is None and not rospy.is_shutdown():
rospy.sleep(0.5)
rospy.loginfo("[{0}] Waiting for joint state from arm controller.".format(rospy.get_name()))
#Set rate limits on a per-joint basis
self.max_vel_rot = [np.pi]*len(self.joint_names)
self.target_sub = rospy.Subscriber('{0}/lookat_ik/goal'.format(rospy.get_name()), PointStamped, self.goal_cb)
rospy.loginfo("[{0}] Ready.".format(rospy.get_name()))
def joint_state_cb(self, jtcs):
if self.joint_names is None:
self.joint_names = jtcs.joint_names
self.joint_angles_act = jtcs.actual.positions
self.joint_angles_des = jtcs.desired.positions
def goal_cb(self, pt_msg):
rospy.loginfo("[{0}] New LookatIK goal received.".format(rospy.get_name()))
if (pt_msg.header.frame_id != self.camera_ik.base_frame):
try:
now = pt_msg.header.stamp
self.tfl.waitForTransform(pt_msg.header.frame_id,
self.camera_ik.base_frame,
now, rospy.Duration(1.0))
pt_msg = self.tfl.transformPoint(self.camera_ik.base_frame, pt_msg)
except (LookupException, ConnectivityException, ExtrapolationException):
rospy.logwarn("[{0}] TF Error tranforming point from {1} to {2}".format(rospy.get_name(),
pt_msg.header.frame_id,
self.camera_ik.base_frame))
target = np.array([pt_msg.point.x, pt_msg.point.y, pt_msg.point.z])
# Get IK Solution
current_angles = list(copy.copy(self.joint_angles_act))
iksol = self.camera_ik.lookat_ik(target, current_angles[:len(self.camera_ik.arm_indices)])
# Start with current angles, then replace angles in camera IK with IK solution
# Use desired joint angles to avoid sagging over time
jtp = JointTrajectoryPoint()
jtp.positions = list(copy.copy(self.joint_angles_des))
for i, joint_name in enumerate(self.camera_ik.arm_joint_names):
jtp.positions[self.joint_names.index(joint_name)] = iksol[i]
deltas = np.abs(np.subtract(jtp.positions, current_angles))
duration = np.max(np.divide(deltas, self.max_vel_rot))
jtp.time_from_start = rospy.Duration(duration)
jt = JointTrajectory()
jt.joint_names = self.joint_names
jt.points.append(jtp)
rospy.loginfo("[{0}] Sending Joint Angles.".format(rospy.get_name()))
self.joint_traj_pub.publish(jt)
示例5: main
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
def main():
rospy.init_node('tf_test')
rospy.loginfo("Node for testing actionlib server")
#from_link = '/head_color_camera_l_link'
#to_link = '/base_link'
from_link = '/base_link'
to_link = '/map'
tfl = TransformListener()
rospy.sleep(5)
t = rospy.Time(0)
mpose = PoseStamped()
mpose.pose.position.x = 1
mpose.pose.position.y = 0
mpose.pose.position.z = 0
mpose.pose.orientation.x = 0
mpose.pose.orientation.y = 0
mpose.pose.orientation.z = 0
mpose.pose.orientation.w = 0
mpose.header.frame_id = from_link
mpose.header.stamp = rospy.Time.now()
rospy.sleep(5)
mpose_transf = None
rospy.loginfo('Waiting for transform for some time...')
tfl.waitForTransform(to_link,from_link,t,rospy.Duration(5))
if tfl.canTransform(to_link,from_link,t):
mpose_transf = tfl.transformPose(to_link,mpose)
print mpose_transf
else:
rospy.logerr('Transformation is not possible!')
sys.exit(0)
示例6: __init__
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
class Demo:
def __init__(self, goals):
rospy.init_node("demo", anonymous=True)
self.frame = rospy.get_param("~frame")
self.pubGoal = rospy.Publisher("goal", PoseStamped, queue_size=1)
self.listener = TransformListener()
self.goals = goals
self.goalIndex = 0
def run(self):
self.listener.waitForTransform("/world", self.frame, rospy.Time(), rospy.Duration(5.0))
goal = PoseStamped()
goal.header.seq = 0
goal.header.frame_id = "world"
while not rospy.is_shutdown():
goal.header.seq += 1
goal.header.stamp = rospy.Time.now()
goal.pose.position.x = self.goals[self.goalIndex][0]
goal.pose.position.y = self.goals[self.goalIndex][1]
goal.pose.position.z = self.goals[self.goalIndex][2]
quaternion = tf.transformations.quaternion_from_euler(0, 0, self.goals[self.goalIndex][3])
goal.pose.orientation.x = quaternion[0]
goal.pose.orientation.y = quaternion[1]
goal.pose.orientation.z = quaternion[2]
goal.pose.orientation.w = quaternion[3]
self.pubGoal.publish(goal)
t = self.listener.getLatestCommonTime("/world", self.frame)
if self.listener.canTransform("/world", self.frame, t):
position, quaternion = self.listener.lookupTransform("/world", self.frame, t)
rpy = tf.transformations.euler_from_quaternion(quaternion)
if (
math.fabs(position[0] - self.goals[self.goalIndex][0]) < 0.3
and math.fabs(position[1] - self.goals[self.goalIndex][1]) < 0.3
and math.fabs(position[2] - self.goals[self.goalIndex][2]) < 0.3
and math.fabs(rpy[2] - self.goals[self.goalIndex][3]) < math.radians(10)
and self.goalIndex < len(self.goals) - 1
):
rospy.sleep(self.goals[self.goalIndex][4])
self.goalIndex += 1
示例7: TwistToPoseConverter
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
class TwistToPoseConverter(object):
def __init__(self):
self.ee_frame = rospy.get_param('~ee_frame', "/l_gripper_tool_frame")
self.twist_sub = rospy.Subscriber('/twist_in', TwistStamped, self.twist_cb)
self.pose_pub = rospy.Publisher('/pose_out', PoseStamped)
self.tf_listener = TransformListener()
def get_ee_pose(self, frame='/torso_lift_link', time=None):
"""Get current end effector pose from tf."""
try:
now = rospy.Time.now() if time is None else time
self.tf_listener.waitForTransform(frame, self.ee_frame, now, rospy.Duration(4.0))
pos, quat = self.tf_listener.lookupTransform(frame, self.ee_frame, now)
except (LookupException, ConnectivityException, ExtrapolationException, Exception) as e:
rospy.logwarn("[%s] TF Failure getting current end-effector pose:\r\n %s"
%(rospy.get_name(), e))
return None, None
return pos, quat
def twist_cb(self, ts):
"""Get current end effector pose and augment with twist command."""
cur_pos, cur_quat = self.get_ee_pose(ts.header.frame_id)
ps = PoseStamped()
ps.header.frame_id = ts.header.frame_id
ps.header.stamp = ts.header.stamp
ps.pose.position.x = cur_pos[0] + ts.twist.linear.x
ps.pose.position.y = cur_pos[1] + ts.twist.linear.y
ps.pose.position.z = cur_pos[2] + ts.twist.linear.z
twist_quat = trans.quaternion_from_euler(ts.twist.angular.x,
ts.twist.angular.y,
ts.twist.angular.z)
final_quat = trans.quaternion_multiply(twist_quat, cur_quat)
ps.pose.orientation = Quaternion(*final_quat)
try:
self.tf_listener.waitForTransform('/torso_lift_link', ps.header.frame_id,
ps.header.stamp, rospy.Duration(3.0))
pose_out = self.tf_listener.transformPose('/torso_lift_link', ps)
self.pose_pub.publish(pose_out)
except (LookupException, ConnectivityException, ExtrapolationException, Exception) as e:
rospy.logwarn("[%s] TF Failure transforming goal pose to torso_lift_link:\r\n %s"
%(rospy.get_name(), e))
示例8: __init__
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
#.........这里部分代码省略.........
# Find circles given the camera image
dp = 1.1
minDist = 90
circles = cv2.HoughCircles(color_dst, cv2.cv.CV_HOUGH_GRADIENT, dp, minDist)
# If no circles are detected then exit the program
if circles == None:
print "No circles found using these parameters"
sys.exit()
circles = numpy.uint16(numpy.around(circles))
# Draw the center of the circle closest to the top
ycoord = []
for i in circles[0,:]:
ycoord.append(i[1])
# Find the top circle in the frame
top_circ_y_coor = min(ycoord)
x_coor = 0
y_coor = 0
for i in circles[0,:]:
if i[1] == top_circ_y_coor:
# draw the center of the circle
#cv2.circle(self.rgb_image,(i[0],i[1]),2,(0,0,255),3)
# draw outer circle
#cv2.circle(self.rgb_image,(i[0],i[1]),i[2],(0,255,0),2)
x_coor = i[0]
y_coor = i[1]
cv2.circle(self.rgb_image,(327,247),2,(0,0,255),3)
# Set the coordinates for the center of the circle
self.center = (x_coor, y_coor)
#self.center = (328,248)
cv2.imshow('rgb', self.rgb_image)
cv2.waitKey(1)
def on_depth(self, image):
# Use numpy so that cv2 can use image
self.depth_image = numpy.fromstring(image.data, dtype='float32').reshape((480,640))
nmask = numpy.isnan(self.depth_image)
#Find the minimum and the maximum of the depth image
Dmin = self.depth_image[~nmask].min()
Dmax = self.depth_image[~nmask].max()
# If a circle has been found find its depth and apply that to the ray
if self.center!=None:
ray = self.cam_model.projectPixelTo3dRay(self.center)
depth = self.depth_image[self.center[1]][self.center[0]]
# If the depth is not a number exit
if math.isnan(depth):
print "Incomplete information on depth at this point"
return
# Otherwise mulitply the depth by the unit vector of the ray
else:
print "depth", depth
cam_coor = [depth*ray[0],depth*ray[1],depth*ray[2]]
#print "camera frame coordinate:", cam_coor
else:
return
# Rescale to [0,1]
cv2.imshow('depth', self.depth_image / 2.0)
cv2.waitKey(1)
# Only use depth if the RGB image is running
if self.rgb_image_time is None:
rospy.loginfo('not using depth cause no RGB yet')
return
time_since_rgb = image.header.stamp - self.rgb_image_time
# Only use depth if it is close in time to the RGB image
if time_since_rgb > rospy.Duration(0.5):
rospy.loginfo('not using depth because time since RGB is ' + str(time_since_rgb))
#return
# Find transform from camera frame to world frame
self.tf_listener.waitForTransform(self.cam_model.tfFrame(), "base_link",
image.header.stamp, rospy.Duration(1.0))
# Set up the point to be published
self.return_point.header.stamp = image.header.stamp
self.return_point.header.frame_id = self.cam_model.tfFrame()
self.return_point.point.x = cam_coor[0]
self.return_point.point.y = cam_coor[1]
self.return_point.point.z = cam_coor[2]
# Transform point to the baselink frame
self.return_point = self.tf_listener.transformPoint("base_link", self.return_point)
print "world frame coordinate:", self.return_point.point.x, \
self.return_point.point.y,self.return_point.point.z, "\n"
self.point_pub.publish(self.return_point)
示例9: NormalApproach
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
class NormalApproach():
standoff = 0.368 #0.2 + 0.168 (dist from wrist to fingertips)
frame = 'base_footprint'
px = py = pz = 0;
qx = qy = qz = 0;
qw = 1;
def __init__(self):
rospy.init_node('normal_approach_right')
rospy.Subscriber('norm_approach_right', PoseStamped, self.update_frame)
rospy.Subscriber('r_hand_pose', PoseStamped, self.update_curr_pose)
rospy.Subscriber('wt_lin_move_right',Float32, self.linear_move)
self.goal_out = rospy.Publisher('wt_right_arm_pose_commands', PoseStamped)
self.move_arm_out = rospy.Publisher('wt_move_right_arm_goals', PoseStamped)
self.tf = TransformListener()
self.tfb = TransformBroadcaster()
self.wt_log_out = rospy.Publisher('wt_log_out', String )
def update_curr_pose(self, msg):
self.currpose = msg;
def update_frame(self, pose):
self.standoff = 0.368
self.frame = pose.header.frame_id
self.px = pose.pose.position.x
self.py = pose.pose.position.y
self.pz = pose.pose.position.z
self.qx = pose.pose.orientation.x
self.qy = pose.pose.orientation.y
self.qz = pose.pose.orientation.z
self.qw = pose.pose.orientation.w
self.tfb.sendTransform((self.px,self.py,self.pz),(self.qx,self.qy,self.qz,self.qw), rospy.Time.now(), "pixel_3d_frame", self.frame)
self.find_approach(pose)
def find_approach(self, msg):
self.pose_in = msg
self.tf.waitForTransform('pixel_3d_frame','base_footprint', rospy.Time(0), rospy.Duration(3.0))
self.tfb.sendTransform((self.pose_in.pose.position.x, self.pose_in.pose.position.y, self.pose_in.pose.position.z),(self.pose_in.pose.orientation.x, self.pose_in.pose.orientation.y, self.pose_in.pose.orientation.z, self.pose_in.pose.orientation.w), rospy.Time.now(), "pixel_3d_frame", self.pose_in.header.frame_id)
self.tf.waitForTransform('pixel_3d_frame','r_wrist_roll_link', rospy.Time.now(), rospy.Duration(3.0))
goal = PoseStamped()
goal.header.frame_id = 'pixel_3d_frame'
goal.header.stamp = rospy.Time.now()
goal.pose.position.z = self.standoff
goal.pose.orientation.x = 0
goal.pose.orientation.y = 0.5*math.sqrt(2)
goal.pose.orientation.z = 0
goal.pose.orientation.w = 0.5*math.sqrt(2)
#print "Goal:\r\n %s" %goal
self.tf.waitForTransform(goal.header.frame_id, 'torso_lift_link', rospy.Time.now(), rospy.Duration(3.0))
appr = self.tf.transformPose('torso_lift_link', goal)
# print "Appr: \r\n %s" %appr
self.wt_log_out.publish(data="Normal Approach with right hand: Trying to move WITH motion planning")
self.move_arm_out.publish(appr)
def linear_move(self, msg):
print "Linear Move: Right Arm: %s m Step" %msg.data
self.tf.waitForTransform(self.currpose.header.frame_id, 'r_wrist_roll_link', self.currpose.header.stamp, rospy.Duration(3.0))
newpose = self.tf.transformPose('r_wrist_roll_link', self.currpose)
newpose.pose.position.x += msg.data
step_goal = self.tf.transformPose(self.currpose.header.frame_id, newpose)
self.goal_out.publish(step_goal)
示例10: Shaving_manager
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
#.........这里部分代码省略.........
rospy.loginfo("Moving Across Ellipsoid")
self.wt_log_out.publish("Moving Across Ellipsoid")
def disc_change_pose(self, step):
self.new_pose = True
self.selected_pose += step.data
if self.selected_pose > 7:
self.selected_pose = 7
self.wt_log_out.publish(data="Final position in sequence, there is no next position")
return
elif self.selected_pose < 0:
self.selected_pose = 0
self.wt_log_out.publish(data="First position in sequence, there is no previous position")
return
rospy.loginfo("Moving to "+self.poses[self.selected_pose][0])
self.wt_log_out.publish(data="Moving to "+self.poses[self.selected_pose][0])
ellipse_pose = self.get_pose_proxy(self.poses[self.selected_pose][0], self.poses[self.selected_pose][1]).pose
self.adjust_pose(ellipse_pose)
def set_shave_pose(self, num):
self.selected_pose = num.data
rospy.loginfo("Moving to "+self.poses[self.selected_pose][0])
self.wt_log_out.publish(data="Moving to "+self.poses[self.selected_pose][0])
ellipse_pose = self.get_pose_proxy(self.poses[self.selected_pose][0], self.poses[self.selected_pose][1]).pose
self.adjust_pose(ellipse_pose)
def adjust_pose(self, ellipse_pose):
self.controller_switcher.carefree_switch('l','%s_cart')
self.jtarms.ft_move_client.cancel_all_goals() #Stop what we're doing, as we're about to do something different, and stopping soon may be desired
self.jtarms.ft_hold_client.cancel_all_goals()
#self.test_out.publish(ellipse_pose)
print "New Position Received, should stop current action"
self.tf.waitForTransform(ellipse_pose.header.frame_id, 'torso_lift_link', ellipse_pose.header.stamp, rospy.Duration(3.0))
torso_pose = self.tf.transformPose('torso_lift_link', ellipse_pose)
if TOOL == 'inline':
goal_pose = self.jtarms.pose_frame_move(torso_pose, -(self.hand_offset + self.inline_tool_offset), arm=0)
approach_pose = self.jtarms.pose_frame_move(goal_pose, -0.15, arm=0)
elif TOOL == '90':
goal_pose = self.jtarms.pose_frame_move(torso_pose, 0.015)
elif TOOL == '45':
goal_pose = torso_pose
approach_pose = self.jtarms.pose_frame_move(goal_pose, -0.15, arm=0)
traj_to_approach = self.jtarms.build_trajectory(approach_pose, arm=0)
traj_appr_to_goal = self.jtarms.build_trajectory(goal_pose, approach_pose, arm=0)
self.ft_move(traj_to_approach)
self.rezero_wrench.publish(data=True)
self.ft_move(traj_appr_to_goal, ignore_ft=False)
retreat_traj = self.jtarms.build_trajectory(approach_pose, arm=0)
escape_traj = self.jtarms.build_trajectory(approach_pose, arm=0, steps=30)
self.jtarms.ft_hold_client.send_goal(FtHoldGoal(2.,8,10,rospy.Duration(10)))#activity_thresh, z_unsafe, mag_unsafe, timeout
print "Waiting for hold result"
finished = self.jtarms.ft_hold_client.wait_for_result(rospy.Duration(0))
print "Finished before Timeout: %s" %finished
print "Done Waiting"
hold_result = self.jtarms.ft_hold_client.get_result()
print "Hold Result:"
print hold_result
if hold_result.unsafe:
self.ft_move(escape_traj)
elif hold_result.timeout:
self.ft_move(retreat_traj)
def ft_move(self, traj, ft_thresh=2, ignore_ft=True):
self.jtarms.ft_move_client.send_goal(FtMoveGoal(traj, ft_thresh, ignore_ft),feedback_cb=self.ft_move_feedback)
示例11: id
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
from visualization_msgs.msg import MarkerArray
import operator
from tf.transformations import euler_from_quaternion
## reality: make sure that person to align to is not robot itself
## align to person with lowest id (except robot)
rospy.init_node('face_to_person')
sss=simple_script_server()
#sss.init("base")
#sss.move("arm","folded")
listener = TransformListener()
# listener1 = TransformListener()
publisher = rospy.Publisher("followedPerson", MarkerArray)
publisher1 = rospy.Publisher("diameter", MarkerArray)
listener.waitForTransform('/map','/base_link',rospy.Time(),rospy.Duration(5))
target_angle = 0
trackedPerson = ""
personsToFollow = ['Richard'] # can be a list
def mydist(rob, hum):
return math.sqrt( (rob[0]-hum.location.point.x)**2 +
(rob[1]-hum.location.point.y)**2 )
def trackedHumans_callback(data):
print "calllback"
global trackedPerson
global target_angle
示例12: Person_Follow
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
class Person_Follow(object):
def __init__(self):
rospy.init_node('person_follow')
self.twist=Twist()
self.twist.linear.x = 0; self.twist.linear.y = 0; self.twist.linear.z = 0
self.twist.angular.x = 0; self.twist.angular.y = 0; self.twist.angular.z = 0
self.dist0=0
self.target=1
self.p_angle=.01
self.i_angle=.0005
self.p_d=1
self.i_d=.005
self.error_angle_sum=0
self.error_d_sum=0
self.centroid=Point(x=0,y=0)
self.header_msg=Header(stamp=rospy.Time.now(), frame_id='base_link')
def Find_Pos(self, Data):
d={}
for i in range(len(Data.ranges)):
if(Data.ranges[i]!=0):
d[i]=Data.ranges[i]
d_sum_x=0
d_sum_y=0
d_count=0
for key in d:
if key>345 or key<15:
d_sum_x+=d[key]*math.cos(math.radians(key))
d_sum_y+=d[key]*math.sin(math.radians(key))
d_count+=1
self.dist0=Data.ranges[0]
d_avg_x=d_sum_x/d_count
d_avg_y=d_sum_y/d_count
self.centroid.x = d_avg_x
self.centroid.y = d_avg_y
# print self.centroid.x
def run(self):
rospy.Subscriber("/scan",LaserScan,self.Find_Pos,queue_size=10)
self.pub = rospy.Publisher('cmd_vel',Twist,queue_size=10)
self.t=TransformListener()
self.pub_centroid = rospy.Publisher('/centroid',PointStamped,queue_size=10)
r = rospy.Rate(30)
while not rospy.is_shutdown():
theta=0
if self.centroid.x!=0:
theta = 3*math.atan2(self.centroid.y,self.centroid.x)
print theta
self.twist.angular.z = theta
print self.centroid.x
if self.dist0 == 0:
speed = 1
else:
speed = .1 *(self.dist0-.5)
self.twist.linear.x=speed
self.pub.publish(self.twist)
#print self.t.allFramesAsString()
try:
point_stamped_msg = PointStamped(header=self.header_msg,
point=self.t.waitForTransform('odom','base_link',target.header.stamp, rospy.Duration(0.5))Point(x=-self.centroid.x, y=-self.centroid.y))
self.header_msg.stamp = rospy.Time.now()
#print self.header_msg
self.t.waitForTransform('odom','base_link',self.header_msg.stamp, rospy.Duration(0.5))
point_stamped_msg_odom = self.t.transformPoint('/odom',point_stamped_msg)
#print point_stamped_msg_odom
self.pub_centroid.publish(point_stamped_msg_odom)
except Exception as inst:
print inst
示例13: MarkerProcessor
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
class MarkerProcessor(object):
def __init__(self, use_dummy_transform=False):
rospy.init_node('star_center_positioning_node')
if use_dummy_transform:
self.odom_frame_name = ROBOT_NAME+"_odom_dummy"
else:
self.odom_frame_name = ROBOT_NAME+"_odom"
self.marker_locators = {}
self.add_marker_locator(MarkerLocator(0,(-6*12*2.54/100.0,0),0))
self.add_marker_locator(MarkerLocator(1,(0.0,0.0),pi))
self.add_marker_locator(MarkerLocator(2,(0.0,10*12*2.54/100.0),0))
self.add_marker_locator(MarkerLocator(3,(-6*12*2.54/100.0,6*12*2.54/100.0),0))
self.add_marker_locator(MarkerLocator(4,(0.0,6*12*2.54/100.0),pi))
self.pose_correction = rospy.get_param('~pose_correction',0.0)
self.phase_offset = rospy.get_param('~phase_offset',0.0)
self.is_flipped = rospy.get_param('~is_flipped',False)
srv = Server(STARPoseConfig, self.config_callback)
self.marker_sub = rospy.Subscriber("ar_pose_marker",
ARMarkers,
self.process_markers)
self.odom_sub = rospy.Subscriber(ROBOT_NAME+"/odom", Odometry, self.process_odom, queue_size=10)
self.star_pose_pub = rospy.Publisher(ROBOT_NAME+"/STAR_pose",PoseStamped,queue_size=10)
self.continuous_pose = rospy.Publisher(ROBOT_NAME+"/STAR_pose_continuous",PoseStamped,queue_size=10)
self.tf_listener = TransformListener()
self.tf_broadcaster = TransformBroadcaster()
def add_marker_locator(self, marker_locator):
self.marker_locators[marker_locator.id] = marker_locator
def config_callback(self, config, level):
self.phase_offset = config.phase_offset
self.pose_correction = config.pose_correction
return config
def process_odom(self, msg):
p = PoseStamped(header=Header(stamp=rospy.Time(0), frame_id=self.odom_frame_name),
pose=msg.pose.pose)
try:
STAR_pose = self.tf_listener.transformPose("STAR", p)
STAR_pose.header.stamp = msg.header.stamp
self.continuous_pose.publish(STAR_pose)
except Exception as inst:
print "error is", inst
def process_markers(self, msg):
for marker in msg.markers:
# do some filtering basd on prior knowledge
# we know the approximate z coordinate and that all angles but yaw should be close to zero
euler_angles = euler_from_quaternion((marker.pose.pose.orientation.x,
marker.pose.pose.orientation.y,
marker.pose.pose.orientation.z,
marker.pose.pose.orientation.w))
angle_diffs = TransformHelpers.angle_diff(euler_angles[0],pi), TransformHelpers.angle_diff(euler_angles[1],0)
print angle_diffs, marker.pose.pose.position.z
if (marker.id in self.marker_locators and
3.0 <= marker.pose.pose.position.z <= 3.6 and
fabs(angle_diffs[0]) <= .4 and
fabs(angle_diffs[1]) <= .4):
print "FOUND IT!"
locator = self.marker_locators[marker.id]
xy_yaw = list(locator.get_camera_position(marker))
if self.is_flipped:
print "WE ARE FLIPPED!!!"
xy_yaw[2] += pi
print self.pose_correction
print self.phase_offset
xy_yaw[0] += self.pose_correction*cos(xy_yaw[2]+self.phase_offset)
xy_yaw[1] += self.pose_correction*sin(xy_yaw[2]+self.phase_offset)
orientation_tuple = quaternion_from_euler(0,0,xy_yaw[2])
pose = Pose(position=Point(x=-xy_yaw[0],y=-xy_yaw[1],z=0),
orientation=Quaternion(x=orientation_tuple[0], y=orientation_tuple[1], z=orientation_tuple[2], w=orientation_tuple[3]))
# TODO: use markers timestamp instead of now() (unfortunately, not populated currently by ar_pose)
pose_stamped = PoseStamped(header=Header(stamp=msg.header.stamp,frame_id="STAR"),pose=pose)
# TODO: use frame timestamp instead of now()
self.star_pose_pub.publish(pose_stamped)
self.fix_STAR_to_odom_transform(pose_stamped)
def fix_STAR_to_odom_transform(self, msg):
""" Super tricky code to properly update map to odom transform... do not modify this... Difficulty level infinity. """
(translation, rotation) = TransformHelpers.convert_pose_inverse_transform(msg.pose)
p = PoseStamped(pose=TransformHelpers.convert_translation_rotation_to_pose(translation,rotation),header=Header(stamp=rospy.Time(),frame_id=ROBOT_NAME+"_base_link"))
try:
self.tf_listener.waitForTransform(ROBOT_NAME+"_odom",ROBOT_NAME+"_base_link",rospy.Time(),rospy.Duration(1.0))
except Exception as inst:
print "whoops", inst
return
print "got transform"
self.odom_to_STAR = self.tf_listener.transformPose(ROBOT_NAME+"_odom", p)
(self.translation, self.rotation) = TransformHelpers.convert_pose_inverse_transform(self.odom_to_STAR.pose)
def broadcast_last_transform(self):
""" Make sure that we are always broadcasting the last map to odom transformation.
This is necessary so things like move_base can work properly. """
if not(hasattr(self,'translation') and hasattr(self,'rotation')):
#.........这里部分代码省略.........
示例14: ORKTabletop
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
#.........这里部分代码省略.........
min_y = table_corners[i].y
table_dim = Point()
# TODO: if we don't (can't!) compute the height, should we at least give it non-zero depth?
# (would also require shifting the observed centroid down by table_dim.z/2)
table_dim.z = 0.0
table_dim.x = abs(max_x - min_x)
table_dim.y = abs(max_y - min_y)
print "Dimensions: ", table_dim.x, table_dim.y
# Temporary frame used for transformations
table_link = 'table_link'
# centroid of a table in table_link frame
centroid = PoseStamped()
centroid.header.frame_id = table_link
centroid.header.stamp = table_pose.header.stamp
centroid.pose.position.x = (max_x + min_x)/2.
centroid.pose.position.y = (max_y + min_y)/2.
centroid.pose.position.z = 0.0
centroid.pose.orientation.x = 0.0
centroid.pose.orientation.y = 0.0
centroid.pose.orientation.z = 0.0
centroid.pose.orientation.w = 1.0
# generate transform from table_pose to our newly-defined table_link
tt = TransformStamped()
tt.header = table_pose.header
tt.child_frame_id = table_link
tt.transform.translation = table_pose.pose.position
tt.transform.rotation = table_pose.pose.orientation
self.tl.setTransform(tt)
self.tl.waitForTransform(table_link, table_pose.header.frame_id, table_pose.header.stamp, rospy.Duration(3.0))
if self.tl.canTransform(table_pose.header.frame_id, table_link, table_pose.header.stamp):
centroid_table_pose = self.tl.transformPose(table_pose.header.frame_id, centroid)
self.pose_pub.publish(centroid_table_pose)
else:
rospy.logwarn("No transform between %s and %s possible",table_pose.header.frame_id, table_link)
return
# transform each object into desired frame; add to list of clusters
cluster_list = []
for i in range (data.objects.__len__()):
rospy.loginfo("Point clouds %s", data.objects[i].point_clouds.__len__())
pc = PointCloud2()
pc = data.objects[i].point_clouds[0]
arr = pointclouds.pointcloud2_to_array(pc, 1)
arr_xyz = pointclouds.get_xyz_points(arr)
arr_xyz_trans = []
for j in range (arr_xyz.__len__()):
ps = PointStamped();
ps.header.frame_id = table_link
ps.header.stamp = table_pose.header.stamp
ps.point.x = arr_xyz[j][0]
ps.point.y = arr_xyz[j][1]
ps.point.z = arr_xyz[j][2]
if self.tl.canTransform(table_pose.header.frame_id, table_link, table_pose.header.stamp):
ps_in_kinect_frame = self.tl.transformPoint(table_pose.header.frame_id, ps)
else:
rospy.logwarn("No transform between %s and %s possible",table_pose.header.frame_id, table_link)
return
arr_xyz_trans.append([ps_in_kinect_frame.point.x, ps_in_kinect_frame.point.y, ps_in_kinect_frame.point.z])
示例15: __init__
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import waitForTransform [as 别名]
#.........这里部分代码省略.........
self._head_buff.put((copy.deepcopy(p), 0.1, True))
p.point.y = sign * random.uniform(1.5, 0.5)
p.point.z = random.uniform(1.7, 0.2)
self._head_buff.put((copy.deepcopy(p), 0.1, True))
p.point.y = -1 * sign * random.uniform(1.5, 0.5)
p.point.z = random.uniform(1.7, 0.2)
self._head_buff.put((copy.deepcopy(p), 0.1, True))
rospy.loginfo("Looking around")
rospy.sleep(1)
def getPointDist(self, pt):
assert(self.face is not None)
# fist, get my position
p = PoseStamped()
p.header.frame_id = "base_link"
p.header.stamp = rospy.Time.now() - rospy.Duration(0.5)
p.pose.position.x = 0
p.pose.position.y = 0
p.pose.position.z = 0
p.pose.orientation.x = 0
p.pose.orientation.y = 0
p.pose.orientation.z = 0
p.pose.orientation.w = 1
try:
self._tf.waitForTransform(p.header.frame_id, self._robot_frame, p.header.stamp, rospy.Duration(2))
p = self._tf.transformPose(self._robot_frame, p)
except:
rospy.logerr("TF error!")
return None
return sqrt(pow(p.pose.position.x - pt.point.x, 2) + pow(p.pose.position.y - pt.point.y, 2) + pow(p.pose.position.z - pt.point.z, 2))
def getPoseStamped(self, group, c):
assert(len(c) == 6)
p = PoseStamped()
p.header.frame_id = "base_link"
p.header.stamp = rospy.Time.now() - rospy.Duration(0.5)
p.pose.position.x = c[0]
p.pose.position.y = c[1]
p.pose.position.z = c[2]
quat = tf.transformations.quaternion_from_euler(c[3], c[4], c[5])
p.pose.orientation.x = quat[0]
p.pose.orientation.y = quat[1]
p.pose.orientation.z = quat[2]
p.pose.orientation.w = quat[3]
try: