本文整理汇总了Python中sensor_msgs.msg.CameraInfo.header方法的典型用法代码示例。如果您正苦于以下问题:Python CameraInfo.header方法的具体用法?Python CameraInfo.header怎么用?Python CameraInfo.header使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sensor_msgs.msg.CameraInfo
的用法示例。
在下文中一共展示了CameraInfo.header方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from sensor_msgs.msg import CameraInfo [as 别名]
# 或者: from sensor_msgs.msg.CameraInfo import header [as 别名]
def __init__(self):
rospy.init_node('image_publish')
name = sys.argv[1]
image = cv2.imread(name)
#cv2.imshow("im", image)
#cv2.waitKey(5)
hz = rospy.get_param("~rate", 1)
frame_id = rospy.get_param("~frame_id", "map")
use_image = rospy.get_param("~use_image", True)
rate = rospy.Rate(hz)
self.ci_in = None
ci_sub = rospy.Subscriber('camera_info_in', CameraInfo,
self.camera_info_callback, queue_size=1)
if use_image:
pub = rospy.Publisher('image', Image, queue_size=1)
ci_pub = rospy.Publisher('camera_info', CameraInfo, queue_size=1)
msg = Image()
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = frame_id
msg.encoding = 'bgr8'
msg.height = image.shape[0]
msg.width = image.shape[1]
msg.step = image.shape[1] * 3
msg.data = image.tostring()
if use_image:
pub.publish(msg)
ci = CameraInfo()
ci.header = msg.header
ci.height = msg.height
ci.width = msg.width
ci.distortion_model ="plumb_bob"
# TODO(lucasw) need a way to set these values- have this node
# subscribe to an input CameraInfo?
ci.D = [0.0, 0.0, 0.0, 0, 0]
ci.K = [500.0, 0.0, msg.width/2, 0.0, 500.0, msg.height/2, 0.0, 0.0, 1.0]
ci.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
ci.P = [500.0, 0.0, msg.width/2, 0.0, 0.0, 500.0, msg.height/2, 0.0, 0.0, 0.0, 1.0, 0.0]
# ci_pub.publish(ci)
# TODO(lwalter) only run this is hz is positive,
# otherwise wait for input trigger message to publish an image
while not rospy.is_shutdown():
if self.ci_in is not None:
ci = self.ci_in
msg.header.stamp = rospy.Time.now()
ci.header = msg.header
if use_image:
pub.publish(msg)
ci_pub.publish(ci)
if hz <= 0:
rospy.sleep()
else:
rate.sleep()
示例2: default
# 需要导入模块: from sensor_msgs.msg import CameraInfo [as 别名]
# 或者: from sensor_msgs.msg.CameraInfo import header [as 别名]
def default(self, ci="unused"):
if not self.component_instance.capturing:
return # press [Space] key to enable capturing
image_local = self.data["image"]
image = Image()
image.header = self.get_ros_header()
image.header.frame_id += "/base_image"
image.height = self.component_instance.image_height
image.width = self.component_instance.image_width
image.encoding = "rgba8"
image.step = image.width * 4
# VideoTexture.ImageRender implements the buffer interface
image.data = bytes(image_local)
# fill this 3 parameters to get correcty image with stereo camera
Tx = 0
Ty = 0
R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
intrinsic = self.data["intrinsic_matrix"]
camera_info = CameraInfo()
camera_info.header = image.header
camera_info.height = image.height
camera_info.width = image.width
camera_info.distortion_model = "plumb_bob"
camera_info.K = [
intrinsic[0][0],
intrinsic[0][1],
intrinsic[0][2],
intrinsic[1][0],
intrinsic[1][1],
intrinsic[1][2],
intrinsic[2][0],
intrinsic[2][1],
intrinsic[2][2],
]
camera_info.R = R
camera_info.P = [
intrinsic[0][0],
intrinsic[0][1],
intrinsic[0][2],
Tx,
intrinsic[1][0],
intrinsic[1][1],
intrinsic[1][2],
Ty,
intrinsic[2][0],
intrinsic[2][1],
intrinsic[2][2],
0,
]
self.publish(image)
self.topic_camera_info.publish(camera_info)
示例3: makeROSInfo
# 需要导入模块: from sensor_msgs.msg import CameraInfo [as 别名]
# 或者: from sensor_msgs.msg.CameraInfo import header [as 别名]
def makeROSInfo(image):
ci = CameraInfo()
head = Header()
head.stamp = rospy.Time.now()
ci.header = head
w,h = image.shape[:2]
ci.width = w
ci.height = h
ci.distortion_model = 'plumb_bob'
return ci
示例4: default
# 需要导入模块: from sensor_msgs.msg import CameraInfo [as 别名]
# 或者: from sensor_msgs.msg.CameraInfo import header [as 别名]
def default(self, ci='unused'):
if not self.component_instance.capturing:
return # press [Space] key to enable capturing
image_local = self.data['image']
image = Image()
image.header = self.get_ros_header()
image.height = self.component_instance.image_height
image.width = self.component_instance.image_width
image.encoding = self.encoding
image.step = image.width * 4
# VideoTexture.ImageRender implements the buffer interface
image.data = bytes(image_local)
# fill this 3 parameters to get correcty image with stereo camera
Tx = 0
Ty = 0
R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
intrinsic = self.data['intrinsic_matrix']
camera_info = CameraInfo()
camera_info.header = image.header
camera_info.height = image.height
camera_info.width = image.width
camera_info.distortion_model = 'plumb_bob'
camera_info.D = [0]
camera_info.K = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2],
intrinsic[1][0], intrinsic[1][1], intrinsic[1][2],
intrinsic[2][0], intrinsic[2][1], intrinsic[2][2]]
camera_info.R = R
camera_info.P = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2], Tx,
intrinsic[1][0], intrinsic[1][1], intrinsic[1][2], Ty,
intrinsic[2][0], intrinsic[2][1], intrinsic[2][2], 0]
if self.pub_tf:
self.publish_with_robot_transform(image)
else:
self.publish(image)
self.topic_camera_info.publish(camera_info)
示例5: fetch_image
# 需要导入模块: from sensor_msgs.msg import CameraInfo [as 别名]
# 或者: from sensor_msgs.msg.CameraInfo import header [as 别名]
def fetch_image(self, cam):
cam.simulate()
if not cam.pixels:
return None, None
cv_img = cv.CreateImageHeader((cam.width , cam.height), cv.IPL_DEPTH_8U, 3)
cv.SetData(cv_img, cam.pixels, cam.width*3)
cv.ConvertImage(cv_img, cv_img, cv.CV_CVTIMG_FLIP)
im = self.bridge.cv_to_imgmsg(cv_img, "rgb8")
caminfo = CameraInfo()
caminfo.header = im.header
caminfo.height = cam.height
caminfo.width = cam.width
caminfo.D = 5*[0.]
caminfo.K = sum([list(r) for r in cam.K],[])
caminfo.P = sum([list(r) for r in cam.P],[])
caminfo.R = sum([list(r) for r in cam.R],[])
return im, caminfo
示例6: default
# 需要导入模块: from sensor_msgs.msg import CameraInfo [as 别名]
# 或者: from sensor_msgs.msg.CameraInfo import header [as 别名]
def default(self, ci='unused'):
""" Publish the data of the Camera as a ROS Image message. """
if not self.component_instance.capturing:
return # press [Space] key to enable capturing
image_local = self.data['image']
image = Image()
image.header = self.get_ros_header()
image.header.frame_id += '/base_image'
image.height = self.component_instance.image_height
image.width = self.component_instance.image_width
image.encoding = 'rgba8'
image.step = image.width * 4
# VideoTexture.ImageRender implements the buffer interface
image.data = bytes(image_local)
# sensor_msgs/CameraInfo [ http://ros.org/wiki/rviz/DisplayTypes/Camera ]
# fill this 3 parameters to get correcty image with stereo camera
Tx = 0
Ty = 0
R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
intrinsic = self.data['intrinsic_matrix']
camera_info = CameraInfo()
camera_info.header = image.header
camera_info.height = image.height
camera_info.width = image.width
camera_info.distortion_model = 'plumb_bob'
camera_info.K = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2],
intrinsic[1][0], intrinsic[1][1], intrinsic[1][2],
intrinsic[2][0], intrinsic[2][1], intrinsic[2][2]]
camera_info.R = R
camera_info.P = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2], Tx,
intrinsic[1][0], intrinsic[1][1], intrinsic[1][2], Ty,
intrinsic[2][0], intrinsic[2][1], intrinsic[2][2], 0]
self.publish(image)
self.topic_camera_info.publish(camera_info)
示例7: run
# 需要导入模块: from sensor_msgs.msg import CameraInfo [as 别名]
# 或者: from sensor_msgs.msg.CameraInfo import header [as 别名]
def run(self):
img = Image()
r = rospy.Rate(self.config['frame_rate'])
while self.is_looping():
if self.pub_img_.get_num_connections() == 0:
if self.nameId:
rospy.loginfo('Unsubscribing from camera as nobody listens to the topics.')
self.camProxy.unsubscribe(self.nameId)
self.nameId = None
r.sleep()
continue
if self.nameId is None:
self.reconfigure(self.config, 0)
r.sleep()
continue
image = self.camProxy.getImageRemote(self.nameId)
if image is None:
continue
# Deal with the image
if self.config['use_ros_time']:
img.header.stamp = rospy.Time.now()
else:
img.header.stamp = rospy.Time(image[4] + image[5]*1e-6)
img.header.frame_id = self.frame_id
img.height = image[1]
img.width = image[0]
nbLayers = image[2]
if image[3] == kYUVColorSpace:
encoding = "mono8"
elif image[3] == kRGBColorSpace:
encoding = "rgb8"
elif image[3] == kBGRColorSpace:
encoding = "bgr8"
elif image[3] == kYUV422ColorSpace:
encoding = "yuv422" # this works only in ROS groovy and later
elif image[3] == kDepthColorSpace:
encoding = "mono16"
else:
rospy.logerr("Received unknown encoding: {0}".format(image[3]))
img.encoding = encoding
img.step = img.width * nbLayers
img.data = image[6]
self.pub_img_.publish(img)
# deal with the camera info
if self.config['source'] == kDepthCamera and image[3] == kDepthColorSpace:
infomsg = CameraInfo()
# yes, this is only for an XTion / Kinect but that's the only thing supported by NAO
ratio_x = float(640)/float(img.width)
ratio_y = float(480)/float(img.height)
infomsg.width = img.width
infomsg.height = img.height
# [ 525., 0., 3.1950000000000000e+02, 0., 525., 2.3950000000000000e+02, 0., 0., 1. ]
infomsg.K = [ 525, 0, 3.1950000000000000e+02,
0, 525, 2.3950000000000000e+02,
0, 0, 1 ]
infomsg.P = [ 525, 0, 3.1950000000000000e+02, 0,
0, 525, 2.3950000000000000e+02, 0,
0, 0, 1, 0 ]
for i in range(3):
infomsg.K[i] = infomsg.K[i] / ratio_x
infomsg.K[3+i] = infomsg.K[3+i] / ratio_y
infomsg.P[i] = infomsg.P[i] / ratio_x
infomsg.P[4+i] = infomsg.P[4+i] / ratio_y
infomsg.D = []
infomsg.binning_x = 0
infomsg.binning_y = 0
infomsg.distortion_model = ""
infomsg.header = img.header
self.pub_info_.publish(infomsg)
elif self.config['camera_info_url'] in self.camera_infos:
infomsg = self.camera_infos[self.config['camera_info_url']]
infomsg.header = img.header
self.pub_info_.publish(infomsg)
r.sleep()
if (self.nameId):
rospy.loginfo("unsubscribing from camera ")
self.camProxy.unsubscribe(self.nameId)
示例8: main_loop
# 需要导入模块: from sensor_msgs.msg import CameraInfo [as 别名]
# 或者: from sensor_msgs.msg.CameraInfo import header [as 别名]
def main_loop(self):
img = Image()
cimg = Image()
r = rospy.Rate(15)
while not rospy.is_shutdown():
if self.pub_img_.get_num_connections() == 0:
r.sleep()
continue
image = self.camProxy.getImageRemote(self.nameId)
if image is None:
continue
# Deal with the image
'''
#Images received from NAO have
if self.config['use_ros_time']:
img.header.stamp = rospy.Time.now()
else:
img.header.stamp = rospy.Time(image[4] + image[5]*1e-6)
'''
img.header.stamp = rospy.Time.now()
img.header.frame_id = self.frame_id
img.height = image[1]
img.width = image[0]
nbLayers = image[2]
if image[3] == kDepthColorSpace:
encoding = "mono16"
else:
rospy.logerr("Received unknown encoding: {0}".format(image[3]))
img.encoding = encoding
img.step = img.width * nbLayers
img.data = image[6]
self.pub_img_.publish(img)
# deal with the camera info
infomsg = CameraInfo()
infomsg.header = img.header
# yes, this is only for an XTion / Kinect but that's the only thing supported by NAO
ratio_x = float(640)/float(img.width)
ratio_y = float(480)/float(img.height)
infomsg.width = img.width
infomsg.height = img.height
# [ 525., 0., 3.1950000000000000e+02, 0., 525., 2.3950000000000000e+02, 0., 0., 1. ]
infomsg.K = [ 525, 0, 3.1950000000000000e+02,
0, 525, 2.3950000000000000e+02,
0, 0, 1 ]
infomsg.P = [ 525, 0, 3.1950000000000000e+02, 0,
0, 525, 2.3950000000000000e+02, 0,
0, 0, 1, 0 ]
for i in range(3):
infomsg.K[i] = infomsg.K[i] / ratio_x
infomsg.K[3+i] = infomsg.K[3+i] / ratio_y
infomsg.P[i] = infomsg.P[i] / ratio_x
infomsg.P[4+i] = infomsg.P[4+i] / ratio_y
infomsg.D = []
infomsg.binning_x = 0
infomsg.binning_y = 0
infomsg.distortion_model = ""
self.pub_info_.publish(infomsg)
#Currently we only get depth image from the 3D camera of NAO, so we make up a fake color image (a black image)
#and publish it under image_color topic.
#This should be update when the color image from 3D camera becomes available.
colorimg = np.zeros((img.height,img.width,3), np.uint8)
try:
cimg = self.bridge.cv2_to_imgmsg(colorimg, "bgr8")
cimg.header.stamp = img.header.stamp
cimg.header.frame_id = img.header.frame_id
self.pub_cimg_.publish(cimg)
except CvBridgeError, e:
print e
r.sleep()
示例9: main
# 需要导入模块: from sensor_msgs.msg import CameraInfo [as 别名]
# 或者: from sensor_msgs.msg.CameraInfo import header [as 别名]
def main():
parser = argparse.ArgumentParser(
description="Convert an LCM log to a ROS bag (mono/stereo images only).")
parser.add_argument('lcm_file', help='Input LCM log.', action='store')
parser.add_argument('left_img_channel', help='LCM channel for left image.')
parser.add_argument('left_camera_yml',
help='Image calibration YAML file from ROS calibrator')
parser.add_argument('--right_img_channel', help='LCM channel for right image.',
action='append', dest='lcm_channels')
parser.add_argument('--right_camera_yml',
help='Image calibration YAML file from ROS calibrator',
action='append', dest='yml_files')
roi_parser = parser.add_argument_group("Format7/ROI", "Format7/ROI options needed when dealing with non-standard video modes.")
roi_parser.add_argument('--binning_x', default=1, type=int, dest='binning_x', help='Image binning factor.')
roi_parser.add_argument('--binning_y', default=1, type=int, dest='binning_y', help='Image binning factor.')
roi_parser.add_argument('--x_offset', default=0, type=int, dest='x_offset', help="ROI x offset (in UNBINNED pixels)")
roi_parser.add_argument('--y_offset', default=0, type=int, dest='y_offset', help="ROI y offset (in UNBINNED pixels)")
roi_parser.add_argument('--width', default=640, type=int, dest='width', help="ROI width (in UNBINNED pixels)")
roi_parser.add_argument('--height', default=480, type=int, dest='height', help="ROI height (in UNBINNED pixels)")
roi_parser.add_argument('--do_rectify', default=False, type=bool, dest='do_rectify', help="Do rectification when querying ROI.")
args = parser.parse_args()
if args.lcm_channels is None:
args.lcm_channels = []
if args.yml_files is None:
args.yml_files = []
args.lcm_channels.append(args.left_img_channel)
args.yml_files.append(args.left_camera_yml)
if len(args.lcm_channels) != len(args.yml_files):
print "LCM channel-YAML file mismatch!"
print "Converting images in %s to ROS bag file..." % (args.lcm_file)
log = lcm.EventLog(args.lcm_file, 'r')
bag = rosbag.Bag(args.lcm_file + '.images.bag', 'w')
# Read in YAML files.
yml = []
for y in args.yml_files:
yml.append(yaml.load(file(y)))
try:
count = 0
for event in log:
for ii in range(len(args.lcm_channels)):
l = args.lcm_channels[ii]
y = yml[ii]
if event.channel == l:
lcm_msg = image_t.decode(event.data)
# Fill in image.
if lcm_msg.pixelformat != image_t.PIXEL_FORMAT_MJPEG:
print "Encountered non-MJPEG compressed image. Skipping..."
continue
ros_msg = CompressedImage()
ros_msg.header.seq = event.eventnum
secs_float = float(lcm_msg.utime)/1e6
nsecs_float = (secs_float - np.floor(secs_float)) * 1e9
ros_msg.header.stamp.secs = np.uint32(np.floor(secs_float))
ros_msg.header.stamp.nsecs = np.uint32(np.floor(nsecs_float))
ros_msg.header.frame_id = "camera"
ros_msg.format = 'jpeg'
ros_msg.data = lcm_msg.data
# Fill in camera info
camera_info = CameraInfo()
camera_info.header = ros_msg.header
camera_info.height = y['image_height']
camera_info.width = y['image_width']
if y["distortion_model"] != "plumb_bob":
print "Encountered non-supported distorion model %s. Skipping..." % y["distortion_model"]
continue
camera_info.distortion_model = y["distortion_model"]
camera_info.D = y["distortion_coefficients"]['data']
camera_info.K = y["camera_matrix"]['data']
camera_info.R = y["rectification_matrix"]['data']
camera_info.P = y["projection_matrix"]['data']
camera_info.binning_x = args.binning_x
camera_info.binning_y = args.binning_y
camera_info.roi.x_offset = args.x_offset
camera_info.roi.y_offset = args.y_offset
camera_info.roi.height = args.height
camera_info.roi.width = args.width
camera_info.roi.do_rectify = args.do_rectify
bag.write("/camera/" + l + "/image_raw/compressed", ros_msg, ros_msg.header.stamp)
bag.write("/camera/" + l + "/camera_info", camera_info, camera_info.header.stamp)
count += 1
#.........这里部分代码省略.........
示例10: while
# 需要导入模块: from sensor_msgs.msg import CameraInfo [as 别名]
# 或者: from sensor_msgs.msg.CameraInfo import header [as 别名]
caminfo_msg = None
with rosbag.Bag(bag_filename, 'w') as bag:
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
msg = bridge.cv2_to_imgmsg(gray, "mono8")
msg.header.seq = seq
msg.header.stamp = rospy.Time.from_sec(initial_time + 1./fps * seq)
bag.write('/camera/image_raw', msg, msg.header.stamp)
if not caminfo_msg:
caminfo_msg = CameraInfo()
caminfo_msg.height, caminfo_msg.width, _ = np.shape(frame)
caminfo_msg.distortion_model = "plumb_bob"
caminfo_msg.D = calib_data['distortion_coefficients'].flatten()
caminfo_msg.K = calib_data['camera_matrix'].flatten()
caminfo_msg.header = msg.header
bag.write('/camera/camera_info', caminfo_msg, caminfo_msg.header.stamp)
seq += 1
else:
break
cap.release()
示例11: len
# 需要导入模块: from sensor_msgs.msg import CameraInfo [as 别名]
# 或者: from sensor_msgs.msg.CameraInfo import header [as 别名]
image.encoding = "mono8"
image.header.frame_id = 'image' if sys.argv[1] == 'mono' \
else 'image_%i'%int(sys.argv[1])
image.header.seq = 0 if len(sys.argv) < 3 else int(sys.argv[2])
with open('camera_info.yml') as f:
data = yaml.load(f.read())
del data['roi'], data['header']
camera_info = CameraInfo(**data)
with rosbag.Bag('%s.bag'%image.header.frame_id, 'w') as bag:
while 1:
try:
img, time = image_time(image.header.frame_id + \
'_%07i'%image.header.seq)
except IOError as error:
print("stop at %i: %s"%(image.header.seq, str(error)))
break
image.data = img.tostring()
image.height, image.width = img.shape
image.step = image.width # grayscale image
image.header.stamp = rospy.Time.from_sec(time)
camera_info.header = image.header
camera_info.height = image.height
camera_info.width = image.width
bag.write('/%s/image'%image.header.frame_id, image, image.header.stamp)
bag.write('/%s/camera_info'%image.header.frame_id, camera_info, \
image.header.stamp)
image.header.seq += 1