本文整理汇总了Python中sensor_msgs.msg.CameraInfo类的典型用法代码示例。如果您正苦于以下问题:Python CameraInfo类的具体用法?Python CameraInfo怎么用?Python CameraInfo使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CameraInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self):
rospy.init_node('image_publish')
name = sys.argv[1]
image = cv2.imread(name)
#cv2.imshow("im", image)
#cv2.waitKey(5)
hz = rospy.get_param("~rate", 1)
frame_id = rospy.get_param("~frame_id", "map")
use_image = rospy.get_param("~use_image", True)
rate = rospy.Rate(hz)
self.ci_in = None
ci_sub = rospy.Subscriber('camera_info_in', CameraInfo,
self.camera_info_callback, queue_size=1)
if use_image:
pub = rospy.Publisher('image', Image, queue_size=1)
ci_pub = rospy.Publisher('camera_info', CameraInfo, queue_size=1)
msg = Image()
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = frame_id
msg.encoding = 'bgr8'
msg.height = image.shape[0]
msg.width = image.shape[1]
msg.step = image.shape[1] * 3
msg.data = image.tostring()
if use_image:
pub.publish(msg)
ci = CameraInfo()
ci.header = msg.header
ci.height = msg.height
ci.width = msg.width
ci.distortion_model ="plumb_bob"
# TODO(lucasw) need a way to set these values- have this node
# subscribe to an input CameraInfo?
ci.D = [0.0, 0.0, 0.0, 0, 0]
ci.K = [500.0, 0.0, msg.width/2, 0.0, 500.0, msg.height/2, 0.0, 0.0, 1.0]
ci.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
ci.P = [500.0, 0.0, msg.width/2, 0.0, 0.0, 500.0, msg.height/2, 0.0, 0.0, 0.0, 1.0, 0.0]
# ci_pub.publish(ci)
# TODO(lwalter) only run this is hz is positive,
# otherwise wait for input trigger message to publish an image
while not rospy.is_shutdown():
if self.ci_in is not None:
ci = self.ci_in
msg.header.stamp = rospy.Time.now()
ci.header = msg.header
if use_image:
pub.publish(msg)
ci_pub.publish(ci)
if hz <= 0:
rospy.sleep()
else:
rate.sleep()
示例2: sim
def sim():
global t, pose, camInfoMsg
global centerPub, targetVelPub, camVelPub, camInfoPub, br, tfl
global switchTimer, imageTimer, estimatorOn
rospy.init_node("sim")
estimatorOn = True
centerPub = rospy.Publisher("markerCenters",Center,queue_size=10)
targetVelPub = rospy.Publisher("ugv0/body_vel",TwistStamped,queue_size=10)
camVelPub = rospy.Publisher("image/body_vel",TwistStamped,queue_size=10)
cameraName = rospy.get_param(rospy.get_name()+"/camera","camera")
camInfoPub = rospy.Publisher(cameraName+"/camera_info",CameraInfo,queue_size=1)
br = tf.TransformBroadcaster()
tfl = tf.TransformListener()
# Camera parameters
camInfoMsg = CameraInfo()
camInfoMsg.D = distCoeffs.tolist()
camInfoMsg.K = camMat.reshape((-1)).tolist()
# Wait for node to get cam info
while (camVelPub.get_num_connections() == 0) and (not rospy.is_shutdown()):
# publish camera parameters
camInfoPub.publish(camInfoMsg)
rospy.sleep(0.5)
# Publishers
rospy.Timer(rospy.Duration(1.0/velRate),velPubCB)
imageTimer = rospy.Timer(rospy.Duration(1.0/frameRate),imagePubCB)
rospy.Timer(rospy.Duration(0.5),camInfoPubCB)
switchTimer = rospy.Timer(rospy.Duration(11.0),switchCB,oneshot=True)
# Initial conditions
startTime = rospy.get_time()
camPos = np.array([0,-1,1.5])
camOrient = np.array([-1*np.sqrt(2)/2,0,0,np.sqrt(2)/2])
targetPos = np.array([0,1,0])
targetOrient = np.array([0,0,0,1])
pose = np.concatenate((camPos,camOrient,targetPos,targetOrient))
r = rospy.Rate(intRate)
h = 1.0/intRate
while not rospy.is_shutdown():
t = np.array(rospy.get_time() - startTime)
poseDot = poseDyn(t,pose)
pose = pose + poseDot*h
# Send Transform
camPos = pose[0:3]
camOrient = pose[3:7]
targetPos = pose[7:10]
targetOrient = pose[10:]
br.sendTransform(targetPos,targetOrient,rospy.Time.now(),"ugv0","world")
br.sendTransform(camPos,camOrient,rospy.Time.now(),"image","world")
r.sleep()
示例3: talker
def talker():
pub = rospy.Publisher('camera_info', CameraInfo, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
data = CameraInfo()
#[[ 820.29938083 0. 408.73210359]
#[ 0. 827.01247867 237.04151422]
#[ 0. 0. 1. ]]
data.K = (820,0.,408.73210359,0.,827.01247867,827.01247867,0.,0.,1.)
pub.publish(data)
rate.sleep()
示例4: send_test_messages
def send_test_messages(self, filename):
self.msg_received = False
# Publish the camera info TODO make this a field in the annotations file to dictate the source calibration file
msg_info = CameraInfo()
msg_info.height = 480
msg_info.width = 640
msg_info.distortion_model = "plumb_bob"
msg_info.D = [-0.28048157543793056, 0.05674481026365553, -0.000988764087143394, -0.00026869128565781613, 0.0]
msg_info.K = [315.128501, 0.0, 323.069638, 0.0, 320.096636, 218.012581, 0.0, 0.0, 1.0]
msg_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
msg_info.P = [217.48876953125, 0.0, 321.3154072384932, 0.0, 0.0, 250.71084594726562, 202.30416165274983, 0.0,
0.0, 0.0, 1.0, 0.0]
msg_info.roi.do_rectify = False
# Publish the test image
img = cv2.imread(filename)
cvb = CvBridge()
msg_raw = cvb.cv2_to_imgmsg(img, encoding="bgr8")
self.pub_info.publish(msg_info)
self.pub_raw.publish(msg_raw)
# Wait for the message to be received
timeout = rospy.Time.now() + rospy.Duration(10) # Wait at most 5 seconds for the node to reply
while not self.msg_received and not rospy.is_shutdown() and rospy.Time.now() < timeout:
rospy.sleep(0.1)
self.assertLess(rospy.Time.now(), timeout, "Waiting for apriltag detection timed out.")
示例5: publish
def publish( self ):
# Get the image.
image = self.__videoDeviceProxy.getImageRemote( self.__videoDeviceProxyName );
# Create Image message.
ImageMessage = Image();
ImageMessage.header.stamp.secs = image[ 5 ];
ImageMessage.width = image[ 0 ];
ImageMessage.height = image[ 1 ];
ImageMessage.step = image[ 2 ] * image[ 0 ];
ImageMessage.is_bigendian = False;
ImageMessage.encoding = 'bgr8';
ImageMessage.data = image[ 6 ];
self.__imagePublisher.publish( ImageMessage );
# Create CameraInfo message.
# Data from the calibration phase is hard coded for now.
CameraInfoMessage = CameraInfo();
CameraInfoMessage.header.stamp.secs = image[ 5 ];
CameraInfoMessage.width = image[ 0 ];
CameraInfoMessage.height = image[ 1 ];
CameraInfoMessage.D = [ -0.0769218451517258, 0.16183180613612602, 0.0011626049774280595, 0.0018733894100460534, 0.0 ];
CameraInfoMessage.K = [ 581.090096189648, 0.0, 341.0926325830606, 0.0, 583.0323248080421, 241.02441593704128, 0.0, 0.0, 1.0 ];
CameraInfoMessage.R = [ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ];
CameraInfoMessage.P = [ 580.5918359588198, 0.0, 340.76398441334106, 0.0, 0.0, 582.4042541534321, 241.04182225157172, 0.0, 0.0, 0.0, 1.0, 0.0] ;
CameraInfoMessage.distortion_model = 'plumb_bob';
#CameraInfoMessage.roi.x_offset = self.__ROIXOffset;
#CameraInfoMessage.roi.y_offset = self.__ROIYOffset;
#CameraInfoMessage.roi.width = self.__ROIWidth;
#CameraInfoMessage.roi.height = self.__ROIHeight;
#CameraInfoMessage.roi.do_rectify = self.__ROIDoRectify;
self.__cameraInfoPublisher.publish( CameraInfoMessage );
示例6: yaml_to_CameraInfo
def yaml_to_CameraInfo(calib_yaml):
"""
Parse a yaml file containing camera calibration data (as produced by
rosrun camera_calibration cameracalibrator.py) into a
sensor_msgs/CameraInfo msg.
Parameters
----------
yaml_fname : str
Path to yaml file containing camera calibration data
Returns
-------
camera_info_msg : sensor_msgs.msg.CameraInfo
A sensor_msgs.msg.CameraInfo message containing the camera calibration
data
"""
# Load data from file
calib_data = yaml.load(calib_yaml)
# Parse
camera_info_msg = CameraInfo()
camera_info_msg.width = calib_data["image_width"]
camera_info_msg.height = calib_data["image_height"]
camera_info_msg.K = calib_data["camera_matrix"]["data"]
camera_info_msg.D = calib_data["distortion_coefficients"]["data"]
camera_info_msg.R = calib_data["rectification_matrix"]["data"]
camera_info_msg.P = calib_data["projection_matrix"]["data"]
camera_info_msg.distortion_model = calib_data["distortion_model"]
return camera_info_msg
示例7: __init__
def __init__(self):
rospy.init_node('image_converter', anonymous=True)
self.filtered_face_locations = rospy.get_param('camera_topic')
self.image_pub = rospy.Publisher(self.filtered_face_locations,Image)
self.image_info = rospy.Publisher('camera_info',CameraInfo)
self.bridge = CvBridge()
cap = cv2.VideoCapture('/home/icog-labs/Videos/video.mp4')
print cap.get(5)
path = rospkg.RosPack().get_path("robots_config")
path = path + "/robot/camera.yaml"
stream = file(path, 'r')
calib_data = yaml.load(stream)
cam_info = CameraInfo()
cam_info.width = calib_data['image_width']
cam_info.height = calib_data['image_height']
cam_info.K = calib_data['camera_matrix']['data']
cam_info.D = calib_data['distortion_coefficients']['data']
cam_info.R = calib_data['rectification_matrix']['data']
cam_info.P = calib_data['projection_matrix']['data']
cam_info.distortion_model = calib_data['distortion_model']
while (not rospy.is_shutdown()) and (cap.isOpened()):
self.keystroke = cv2.waitKey(1000 / 30)
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow('frame', gray)
self.image_pub.publish(self.bridge.cv2_to_imgmsg(gray, "mono8"))
self.image_info.publish(cam_info)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
示例8: makeROSInfo
def makeROSInfo(image):
ci = CameraInfo()
head = Header()
head.stamp = rospy.Time.now()
ci.header = head
w,h = image.shape[:2]
ci.width = w
ci.height = h
ci.distortion_model = 'plumb_bob'
return ci
示例9: default
def default(self, ci="unused"):
if not self.component_instance.capturing:
return # press [Space] key to enable capturing
image_local = self.data["image"]
image = Image()
image.header = self.get_ros_header()
image.header.frame_id += "/base_image"
image.height = self.component_instance.image_height
image.width = self.component_instance.image_width
image.encoding = "rgba8"
image.step = image.width * 4
# VideoTexture.ImageRender implements the buffer interface
image.data = bytes(image_local)
# fill this 3 parameters to get correcty image with stereo camera
Tx = 0
Ty = 0
R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
intrinsic = self.data["intrinsic_matrix"]
camera_info = CameraInfo()
camera_info.header = image.header
camera_info.height = image.height
camera_info.width = image.width
camera_info.distortion_model = "plumb_bob"
camera_info.K = [
intrinsic[0][0],
intrinsic[0][1],
intrinsic[0][2],
intrinsic[1][0],
intrinsic[1][1],
intrinsic[1][2],
intrinsic[2][0],
intrinsic[2][1],
intrinsic[2][2],
]
camera_info.R = R
camera_info.P = [
intrinsic[0][0],
intrinsic[0][1],
intrinsic[0][2],
Tx,
intrinsic[1][0],
intrinsic[1][1],
intrinsic[1][2],
Ty,
intrinsic[2][0],
intrinsic[2][1],
intrinsic[2][2],
0,
]
self.publish(image)
self.topic_camera_info.publish(camera_info)
示例10: publish
def publish(self, event):
if self.imgmsg is None:
return
now = rospy.Time.now()
# setup ros message and publish
with self.lock:
self.imgmsg.header.stamp = now
self.imgmsg.header.frame_id = self.frame_id
self.pub.publish(self.imgmsg)
if self.publish_info:
info = CameraInfo()
info.header.stamp = now
info.header.frame_id = self.frame_id
info.width = self.imgmsg.width
info.height = self.imgmsg.height
self.pub_info.publish(info)
示例11: GetCameraInfo
def GetCameraInfo(width, height):
cam_info = CameraInfo()
cam_info.width = width
cam_info.height = height
cam_info.distortion_model = model
#cam_info.D = [0.0]*5
#cam_info.K = [0.0]*9
#cam_info.R = [0.0]*9
#cam_info.P = [0.0]*12
cam_info.D = D
cam_info.K = K
cam_info.R = R
cam_info.P = P
cam_info.binning_x = 0
cam_info.binning_y = 0
return cam_info
示例12: publish
def publish(self):
now = rospy.Time.now()
bridge = cv_bridge.CvBridge()
img_bgr = cv2.imread(self.file_name)
if img_bgr is None:
jsk_logwarn('cannot read the image at {}'
.format(self.file_name))
return
# resolve encoding
encoding = rospy.get_param('~encoding', 'bgr8')
if getCvType(encoding) == 0:
# mono8
img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
elif getCvType(encoding) == 2:
# 16UC1
img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
img = img.astype(np.float32)
img = img / 255 * (2 ** 16)
img = img.astype(np.uint16)
elif getCvType(encoding) == 5:
# 32FC1
img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
img = img.astype(np.float32)
img /= 255
elif getCvType(encoding) == 16:
# 8UC3
if encoding in ('rgb8', 'rgb16'):
# RGB
img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
else:
img = img_bgr
else:
jsk_logerr('unsupported encoding: {0}'.format(encoding))
return
# setup ros message and publish
imgmsg = bridge.cv2_to_imgmsg(img, encoding=encoding)
imgmsg.header.stamp = now
imgmsg.header.frame_id = self.frame_id
self.pub.publish(imgmsg)
if self.publish_info:
info = CameraInfo()
info.header.stamp = now
info.header.frame_id = self.frame_id
info.width = imgmsg.width
info.height = imgmsg.height
self.pub_info.publish(info)
示例13: sim
def sim():
global t, pose, camInfoMsg
global centerPub, velPub, camInfoPub, br, tfl
rospy.init_node("sim")
centerPub = rospy.Publisher("markerCenters",Center,queue_size=10)
velPub = rospy.Publisher("image/local_vel",TwistStamped,queue_size=10)
cameraName = rospy.get_param(rospy.get_name()+"/camera","camera")
camInfoPub = rospy.Publisher(cameraName+"/camera_info",CameraInfo,queue_size=1)
br = tf.TransformBroadcaster()
tfl = tf.TransformListener()
# Camera parameters
camInfoMsg = CameraInfo()
camInfoMsg.D = distCoeffs.tolist()
camInfoMsg.K = camMat.reshape((-1)).tolist()
# Wait for node to get cam info
while (velPub.get_num_connections() == 0) and (not rospy.is_shutdown()):
# publish camera parameters
camInfoPub.publish(camInfoMsg)
rospy.sleep(0.5)
# Publishers
rospy.Timer(rospy.Duration(1.0/velRate),velPubCB)
rospy.Timer(rospy.Duration(1.0/frameRate),imagePubCB)
rospy.Timer(rospy.Duration(0.5),camInfoPubCB)
# Initial conditions
startTime = rospy.get_time()
camPos = np.array([0,-1,1.5])
camOrient = np.array([-1*np.sqrt(2)/2,0,0,np.sqrt(2)/2])
targetPos = np.array([-1.5,1.5,0])
targetOrient = np.array([0,0,0,1])
pose = np.concatenate((camPos,camOrient,targetPos,targetOrient))
r = rospy.Rate(intRate)
h = 1.0/intRate
while not rospy.is_shutdown():
t = np.array(rospy.get_time() - startTime)
poseDot = poseDyn(t,pose)
pose = pose + poseDot*h
r.sleep()
示例14: parse_yaml
def parse_yaml(self, filename):
stream = file(filename, "r")
calib_data = yaml.load(stream)
cam_info = CameraInfo()
cam_info.width = calib_data["image_width"]
cam_info.height = calib_data["image_height"]
cam_info.K = calib_data["camera_matrix"]["data"]
cam_info.D = calib_data["distortion_coefficients"]["data"]
cam_info.R = calib_data["rectification_matrix"]["data"]
cam_info.P = calib_data["projection_matrix"]["data"]
cam_info.distortion_model = calib_data["distortion_model"]
return cam_info
示例15: parse_yaml
def parse_yaml(filename):
stream = file(filename, 'r')
calib_data = yaml.load(stream)
cam_info = CameraInfo()
cam_info.width = calib_data['image_width']
cam_info.height = calib_data['image_height']
cam_info.K = calib_data['camera_matrix']['data']
cam_info.D = calib_data['distortion_coefficients']['data']
cam_info.R = calib_data['rectification_matrix']['data']
cam_info.P = calib_data['projection_matrix']['data']
cam_info.distortion_model = calib_data['distortion_model']
return cam_info