本文整理汇总了Python中tf.TransformListener.transform方法的典型用法代码示例。如果您正苦于以下问题:Python TransformListener.transform方法的具体用法?Python TransformListener.transform怎么用?Python TransformListener.transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tf.TransformListener
的用法示例。
在下文中一共展示了TransformListener.transform方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sample_detection
# 需要导入模块: from tf import TransformListener [as 别名]
# 或者: from tf.TransformListener import transform [as 别名]
#.........这里部分代码省略.........
mask = np.zeros_like(np.asarray(self.bridge.imgmsg_to_cv(Image,'bgr8')))
cv2.drawContours(mask,[detections[d]['hull']],-1,(255,255,255),-1)
mask = mask[:,:,0]
masked_disp_img = cv2.multiply(mask.astype(np.float32),self.disp_img).astype(np.float32)
disp_out = DisparityImage()
disp_out.min_disparity = self.min_disparity
disp_out.max_disparity = self.max_disparity
disp_out.image = self.bridge.cv_to_imgmsg(cv2.cv.fromarray(masked_disp_img))
disp_out.image.step = 2560
disp_out.header = self.disp_header
self.sample_disparity[d].publish(disp_out)
def handle_disp(self,DisparityImage):
self.disp_img = np.asarray(self.bridge.imgmsg_to_cv(DisparityImage.image))
self.disp_header = DisparityImage.header
self.min_disparity = DisparityImage.min_disparity
self.max_disparity = DisparityImage.max_disparity
def find_samples(self, Image):
self.img = np.asarray(self.bridge.imgmsg_to_cv(Image,'bgr8'))
self.debug_img = self.img.copy()
lab = cv2.cvtColor(self.img, cv2.COLOR_BGR2LAB)
a_regions = self.mser.detect(lab[:,:,1] ,None)
a_hulls = [cv2.convexHull(r.reshape(-1,1,2)) for r in a_regions]
b_regions = self.mser.detect(lab[:,:,2] ,None)
b_hulls = [cv2.convexHull(r.reshape(-1,1,2)) for r in b_regions]
detections = {}
for s in self.samples:
detections[s] = {}
detections[s]['min_dist'] = self.samples[s]['min_dist']
detections[s]['location'] = None
if self.samples[s]['channel'] == 'a':
for h in a_hulls:
mean = self.compute_color_mean(h,self.img,'lab').astype(np.float32)
cols = self.samples[s]['covariance']['cols']
rows = self.samples[s]['covariance']['rows']
model_inverse_covariance = np.linalg.inv(np.asarray(self.samples[s]['covariance']['data'],np.float32).reshape(rows,cols))
dist = cv2.Mahalanobis(mean,np.asarray(self.samples[s]['mean'],np.float32),model_inverse_covariance)
if dist < detections[s]['min_dist']:
detections[s]['min_dist'] = dist
moments = cv2.moments(h)
# converts to x,y
location = np.array([moments['m10']/moments['m00'],moments['m01']/moments['m00']])
detections[s]['location'] = location
detections[s]['hull'] = h
cv2.polylines(self.debug_img,[h],1,(255,0,255),3)
cv2.putText(self.debug_img,s,(int(location[0]),int(location[1])),cv2.FONT_HERSHEY_PLAIN,2,(0,255,255))
elif self.samples[s]['channel'] == 'b':
for h in b_hulls:
mean = self.compute_color_mean(h,self.img,'lab').astype(np.float32)
cols = self.samples[s]['covariance']['cols']
rows = self.samples[s]['covariance']['rows']
model_inverse_covariance = np.linalg.inv(np.asarray(self.samples[s]['covariance']['data'],np.float32).reshape(rows,cols))
dist = cv2.Mahalanobis(mean,np.asarray(self.samples[s]['mean'],np.float32),model_inverse_covariance)
if dist < detections[s]['min_dist']:
detections[s]['min_dist'] = dist
moments = cv2.moments(h)
# converts to x,y
location = np.array([moments['m10']/moments['m00'],moments['m01']/moments['m00']])
detections[s]['location'] = location
detections[s]['hull'] = h
cv2.polylines(self.debug_img,[h],1,(255,255,0),3)
cv2.putText(self.debug_img,s,(int(location[0]),int(location[1])),cv2.FONT_HERSHEY_PLAIN,2,(0,255,255))
return detections
def handle_info(self, CameraInfo):
# grab camera matrix and distortion model
self.K = CameraInfo.K
self.D = CameraInfo.D
self.R = CameraInfo.R
self.P = CameraInfo.P
self.h = CameraInfo.height
self.w = CameraInfo.width
self.frame_id = CameraInfo.header.frame_id
def project_centroid(self,centroid):
# project image coordinates into ray from camera, intersect with ground plane
point = np.zeros((1,1,2))
point[0,0,0] = centroid[0]
point[0,0,1] = centroid[1]
rospy.logerr(np.asarray(self.K).reshape((3,3)))
rect_point = cv2.undistortPoints(point,np.asarray(self.K).reshape((3,3)),np.asarray(self.D))
rospy.logerr(rect_point)
x = rect_point[0,0,0]
y = rect_point[0,0,1]
r = np.sqrt(x**2 + y**2)
theta = np.arctan(r)
phi = np.arctan2(y,x)
#self.tf_listener.lookupTransform('/base_link',self.frame_id)
self.tf_listener.transform('/base_link',self.frame_id)
def compute_color_mean(self,hull,img,color_space):
mask = np.zeros_like(img)
cv2.drawContours(mask,[hull],-1,(255,255,255),-1)
if color_space == 'rgb':
mean = np.asarray(cv2.mean(img,mask[:,:,0])[:3])
return mean
elif color_space == 'lab' or color_space == 'hsv':
mean = np.asarray(cv2.mean(img,mask[:,:,0])[1:3])
return mean