本文整理汇总了Python中sensor_msgs.msg.Image.step方法的典型用法代码示例。如果您正苦于以下问题:Python Image.step方法的具体用法?Python Image.step怎么用?Python Image.step使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sensor_msgs.msg.Image
的用法示例。
在下文中一共展示了Image.step方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: convert
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def convert(data):
width=data.info.width
height=data.info.height
pixelList = [0] *width*height
imageMsg=Image()
imageMsg.header.stamp = rospy.Time.now()
imageMsg.header.frame_id = '1'
imageMsg.height = height
imageMsg.width = width
imageMsg.encoding = 'mono8'
imageMsg.is_bigendian = 0
imageMsg.step = width
for h in range(height):
for w in range(width):
if data.data[h*width+w]==-1:
pixelList[h*width+w] = 150
elif data.data[h*width+w]==0:
pixelList[h*width+w] = 0
elif data.data[h*width+w]==100:
pixelList[h*width+w] = 255
else:
pixelList[h*width+w]=data.data[h*width+w]
print 'ERROR'
imageMsg.data = pixelList
imagePub.publish(imageMsg)
示例2: post_image
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def post_image(self, component_instance):
""" Publish the data of the Camera as a ROS-Image message.
"""
image_local = component_instance.local_data['image']
if not image_local or image_local == '' or not image_local.image or not component_instance.capturing:
return # press [Space] key to enable capturing
parent_name = component_instance.robot_parent.blender_obj.name
image = Image()
image.header.stamp = rospy.Time.now()
image.header.seq = self._seq
# http://www.ros.org/wiki/geometry/CoordinateFrameConventions#Multi_Robot_Support
image.header.frame_id = ('/' + parent_name + '/base_image')
image.height = component_instance.image_height
image.width = component_instance.image_width
image.encoding = 'rgba8'
image.step = image.width * 4
# NOTE: Blender returns the image as a binary string encoded as RGBA
# sensor_msgs.msg.Image.image need to be len() friendly
# TODO patch ros-py3/common_msgs/sensor_msgs/src/sensor_msgs/msg/_Image.py
# to be C-PyBuffer "aware" ? http://docs.python.org/c-api/buffer.html
image.data = bytes(image_local.image)
# RGBA8 -> RGB8 ? (remove alpha channel, save h*w bytes, CPUvore ?)
# http://wiki.blender.org/index.php/Dev:Source/GameEngine/2.49/VideoTexture
# http://www.blender.org/documentation/blender_python_api_2_57_release/bge.types.html#bge.types.KX_Camera.useViewport
for topic in self._topics:
# publish the message on the correct topic
if str(topic.name) == str("/" + parent_name + "/" + component_instance.blender_obj.name):
topic.publish(image)
self._seq = self._seq + 1
示例3: numpy_to_imgmsg
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def numpy_to_imgmsg(image, stamp=None):
from sensor_msgs.msg import Image
rosimage = Image()
rosimage.height = image.shape[0]
rosimage.width = image.shape[1]
if image.dtype == np.uint8:
rosimage.encoding = '8UC%d' % image.shape[2]
rosimage.step = image.shape[2] * rosimage.width
rosimage.data = image.ravel().tolist()
else:
rosimage.encoding = '32FC%d' % image.shape[2]
rosimage.step = image.shape[2] * rosimage.width * 4
rosimage.data = np.array(image.flat, dtype=np.float32).tostring()
if stamp is not None:
rosimage.header.stamp = stamp
return rosimage
示例4: process_frame
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def process_frame(self,cam_id,buf,buf_offset,timestamp,framenumber):
if have_ROS:
msg = Image()
msg.header.seq=framenumber
msg.header.stamp=rospy.Time.from_sec(timestamp)
msg.header.frame_id = "0"
npbuf = np.array(buf)
(height,width) = npbuf.shape
msg.height = height
msg.width = width
msg.encoding = self.encoding
msg.step = width
msg.data = npbuf.tostring() # let numpy convert to string
with self.publisher_lock:
cam_info = self.camera_info
cam_info.header.stamp = msg.header.stamp
cam_info.header.seq = msg.header.seq
cam_info.header.frame_id = msg.header.frame_id
cam_info.width = width
cam_info.height = height
self.publisher.publish(msg)
self.publisher_cam_info.publish(cam_info)
return [],[]
示例5: main_loop
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def main_loop(self):
img = Image()
r = rospy.Rate(self.fps)
while not rospy.is_shutdown():
image = self.camProxy.getImageRemote(self.nameId)
stampAL = image[4] + image[5]*1e-6
#print image[5], stampAL, "%lf"%(stampAL)
img.header.stamp = rospy.Time(stampAL)
img.header.frame_id = self.frame_id
img.height = image[1]
img.width = image[0]
nbLayers = image[2]
if image[3] == kYUVColorSpace:
encoding = "mono8"
elif image[3] == kRGBColorSpace:
encoding = "rgb8"
elif image[3] == kBGRColorSpace:
encoding = "bgr8"
elif image[3] == kYUV422ColorSpace:
encoding = "yuv422" # this works only in ROS groovy and later
else:
rospy.logerror("Received unknown encoding: {0}".format(image[3]))
img.encoding = encoding
img.step = img.width * nbLayers
img.data = image[6]
infomsg = self.cim.getCameraInfo()
infomsg.header = img.header
self.pub_info_.publish(infomsg)
self.pub_img_.publish(img)
r.sleep()
self.camProxy.unsubscribe(self.nameId)
示例6: __init__
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def __init__(self):
rospy.init_node('image_publish')
name = sys.argv[1]
image = cv2.imread(name)
#cv2.imshow("im", image)
#cv2.waitKey(5)
hz = rospy.get_param("~rate", 1)
frame_id = rospy.get_param("~frame_id", "map")
use_image = rospy.get_param("~use_image", True)
rate = rospy.Rate(hz)
self.ci_in = None
ci_sub = rospy.Subscriber('camera_info_in', CameraInfo,
self.camera_info_callback, queue_size=1)
if use_image:
pub = rospy.Publisher('image', Image, queue_size=1)
ci_pub = rospy.Publisher('camera_info', CameraInfo, queue_size=1)
msg = Image()
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = frame_id
msg.encoding = 'bgr8'
msg.height = image.shape[0]
msg.width = image.shape[1]
msg.step = image.shape[1] * 3
msg.data = image.tostring()
if use_image:
pub.publish(msg)
ci = CameraInfo()
ci.header = msg.header
ci.height = msg.height
ci.width = msg.width
ci.distortion_model ="plumb_bob"
# TODO(lucasw) need a way to set these values- have this node
# subscribe to an input CameraInfo?
ci.D = [0.0, 0.0, 0.0, 0, 0]
ci.K = [500.0, 0.0, msg.width/2, 0.0, 500.0, msg.height/2, 0.0, 0.0, 1.0]
ci.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
ci.P = [500.0, 0.0, msg.width/2, 0.0, 0.0, 500.0, msg.height/2, 0.0, 0.0, 0.0, 1.0, 0.0]
# ci_pub.publish(ci)
# TODO(lwalter) only run this is hz is positive,
# otherwise wait for input trigger message to publish an image
while not rospy.is_shutdown():
if self.ci_in is not None:
ci = self.ci_in
msg.header.stamp = rospy.Time.now()
ci.header = msg.header
if use_image:
pub.publish(msg)
ci_pub.publish(ci)
if hz <= 0:
rospy.sleep()
else:
rate.sleep()
示例7: publish
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def publish( self ):
# Get the image.
image = self.__videoDeviceProxy.getImageRemote( self.__videoDeviceProxyName );
# Create Image message.
ImageMessage = Image();
ImageMessage.header.stamp.secs = image[ 5 ];
ImageMessage.width = image[ 0 ];
ImageMessage.height = image[ 1 ];
ImageMessage.step = image[ 2 ] * image[ 0 ];
ImageMessage.is_bigendian = False;
ImageMessage.encoding = 'bgr8';
ImageMessage.data = image[ 6 ];
self.__imagePublisher.publish( ImageMessage );
# Create CameraInfo message.
# Data from the calibration phase is hard coded for now.
CameraInfoMessage = CameraInfo();
CameraInfoMessage.header.stamp.secs = image[ 5 ];
CameraInfoMessage.width = image[ 0 ];
CameraInfoMessage.height = image[ 1 ];
CameraInfoMessage.D = [ -0.0769218451517258, 0.16183180613612602, 0.0011626049774280595, 0.0018733894100460534, 0.0 ];
CameraInfoMessage.K = [ 581.090096189648, 0.0, 341.0926325830606, 0.0, 583.0323248080421, 241.02441593704128, 0.0, 0.0, 1.0 ];
CameraInfoMessage.R = [ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ];
CameraInfoMessage.P = [ 580.5918359588198, 0.0, 340.76398441334106, 0.0, 0.0, 582.4042541534321, 241.04182225157172, 0.0, 0.0, 0.0, 1.0, 0.0] ;
CameraInfoMessage.distortion_model = 'plumb_bob';
#CameraInfoMessage.roi.x_offset = self.__ROIXOffset;
#CameraInfoMessage.roi.y_offset = self.__ROIYOffset;
#CameraInfoMessage.roi.width = self.__ROIWidth;
#CameraInfoMessage.roi.height = self.__ROIHeight;
#CameraInfoMessage.roi.do_rectify = self.__ROIDoRectify;
self.__cameraInfoPublisher.publish( CameraInfoMessage );
示例8: airpub
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def airpub():
pub = rospy.Publisher("airsim/image_raw", Image, queue_size=1)
rospy.init_node('image_raw', anonymous=True)
rate = rospy.Rate(10) # 10hz
# connect to the AirSim simulator
client = airsim.MultirotorClient()
client.confirmConnection()
while not rospy.is_shutdown():
# get camera images from the car
responses = client.simGetImages([
airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)]) #scene vision image in uncompressed RGBA array
for response in responses:
img_rgba_string = response.image_data_uint8
# Populate image message
msg=Image()
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = "frameId"
msg.encoding = "rgba8"
msg.height = 360 # resolution should match values in settings.json
msg.width = 640
msg.data = img_rgba_string
msg.is_bigendian = 0
msg.step = msg.width * 4
# log time and size of published image
rospy.loginfo(len(response.image_data_uint8))
# publish image message
pub.publish(msg)
# sleep until next cycle
rate.sleep()
示例9: appendMessages
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def appendMessages(self, stamp, messages):
if not self._image is None:
# Build the image message and push it on the message list
msg = Image()
msg.header.stamp = stamp
msg.header.frame_id = self._frameId
msg.width = self._image.shape[0]
msg.height = self._image.shape[1]
if (len(self._image.shape) == 2) or (len(self._image.shape) == 3 and self._image.shape[2] == 1):
# A gray image
msg.encoding = '8UC1'
stepMult = 1
elif len(self._image.shape) == 3 and self._image.shape[2] == 3:
# A color image
msg.encoding = 'rgb8'
stepMult = 3
elif len(self._image.shape) == 3 and self._image.shape[2] == 4:
# A color image
msg.encoding = 'rgba8'
stepMult = 3
else:
raise RuntimeError("The parsing of images is very simple. " +\
"Only 3-channel rgb (rgb8), 4 channel rgba " +\
"(rgba8) and 1 channel mono (mono8) are " +\
"supported. Got an image with shape " +\
"{0}".format(self._image.shape))
msg.is_bigendian = False
msg.step = stepMult * msg.width
msg.data = self._image.flatten().tolist()
messages.append((self._topic, msg))
示例10: main_loop
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def main_loop(self):
img = Image()
while not rospy.is_shutdown():
#print "getting image..",
image = self.camProxy.getImageRemote(self.nameId)
#print "ok"
# TODO: better time
img.header.stamp = rospy.Time.now()
img.header.frame_id = self.frame_id
img.height = image[1]
img.width = image[0]
nbLayers = image[2]
#colorspace = image[3]
if image[3] == kYUVColorSpace:
encoding = "mono8"
elif image[3] == kRGBColorSpace:
encoding = "rgb8"
elif image[3] == kBGRColorSpace:
encoding = "bgr8"
else:
rospy.logerror("Received unknown encoding: {0}".format(image[3]))
img.encoding = encoding
img.step = img.width * nbLayers
img.data = image[6]
self.info_.width = img.width
self.info_.height = img.height
self.info_.header = img.header
self.pub_img_.publish(img)
self.pub_info_.publish(self.info_)
rospy.sleep(0.0001)# TODO: is this necessary?
self.camProxy.unsubscribe(self.nameId)
示例11: default
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def default(self, ci="unused"):
if not self.component_instance.capturing:
return # press [Space] key to enable capturing
image_local = self.data["image"]
image = Image()
image.header = self.get_ros_header()
image.header.frame_id += "/base_image"
image.height = self.component_instance.image_height
image.width = self.component_instance.image_width
image.encoding = "rgba8"
image.step = image.width * 4
# VideoTexture.ImageRender implements the buffer interface
image.data = bytes(image_local)
# fill this 3 parameters to get correcty image with stereo camera
Tx = 0
Ty = 0
R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
intrinsic = self.data["intrinsic_matrix"]
camera_info = CameraInfo()
camera_info.header = image.header
camera_info.height = image.height
camera_info.width = image.width
camera_info.distortion_model = "plumb_bob"
camera_info.K = [
intrinsic[0][0],
intrinsic[0][1],
intrinsic[0][2],
intrinsic[1][0],
intrinsic[1][1],
intrinsic[1][2],
intrinsic[2][0],
intrinsic[2][1],
intrinsic[2][2],
]
camera_info.R = R
camera_info.P = [
intrinsic[0][0],
intrinsic[0][1],
intrinsic[0][2],
Tx,
intrinsic[1][0],
intrinsic[1][1],
intrinsic[1][2],
Ty,
intrinsic[2][0],
intrinsic[2][1],
intrinsic[2][2],
0,
]
self.publish(image)
self.topic_camera_info.publish(camera_info)
示例12: copy_Image_empty_data
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def copy_Image_empty_data(image_old):
image_new = Image()
image_new.header = copy(image_old.header)
image_new.height = copy(image_old.height)
image_new.width = copy(image_old.width)
image_new.encoding = copy(image_old.encoding)
image_new.is_bigendian = copy(image_old.is_bigendian)
image_new.step = copy(image_old.step)
return image_new
示例13: publishCombined
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def publishCombined(self):
#Enter Main Loop
while not rospy.is_shutdown():
#Convert to Numpy Arrays
map = []
for i in range(0, self.numRobots):
map.append(numpy.array(self.searchedData[i].data))
combined2 = map[0]
if self.numRobots > 1:
#Find Minimum of all maps
for i in range(1, self.numRobots):
combined2 = numpy.minimum(combined2,map[i])
#Pack Occupancy Grid Message
mapMsg=OccupancyGrid()
mapMsg.header.stamp=rospy.Time.now()
mapMsg.header.frame_id=self.mapData.header.frame_id
mapMsg.info.resolution=self.mapData.info.resolution
mapMsg.info.width=self.mapData.info.width
mapMsg.info.height=self.mapData.info.height
mapMsg.info.origin=self.mapData.info.origin
mapMsg.data=combined2.tolist()
#Convert combined Occupancy grid values to grayscal image values
combined2[combined2 == -1] = 150 #Unknown -1->150 (gray)
combined2[combined2 == 100] = 255 #Not_Searched 100->255 (white)
#Searched=0 (black)
#Calculate percentage of open area searched
numNotSearched = combined2[combined2==255].size
numSearched = combined2[combined2==0].size
percentSearched = 100*float(numSearched)/(numNotSearched+numSearched)
percentSearchedMsg = Float32()
percentSearchedMsg.data = percentSearched
self.percentPub.publish(percentSearchedMsg)
#Pack Image Message
imageMsg=Image()
imageMsg.header.stamp = rospy.Time.now()
imageMsg.header.frame_id = self.mapData.header.frame_id
imageMsg.height = self.mapData.info.height
imageMsg.width = self.mapData.info.width
imageMsg.encoding = 'mono8'
imageMsg.is_bigendian = 0
imageMsg.step = self.mapData.info.width
imageMsg.data = combined2.tolist()
#Publish Combined Occupancy Grid and Image
self.searchedCombinePub.publish(mapMsg)
self.imagePub.publish(imageMsg)
#Update Every 0.5 seconds
rospy.sleep(1.0)
示例14: callback
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def callback(self, data):
#print "got some shit coming in"
#if data is not None:
#plane= data.pose.position
#nrm= norm([plane.x, plane.y, plane.z])
#normal= np.array([plane.x, plane.y, plane.z])/nrm
#print "got here"
##replace with numpy array
#plane= np.array([plane.x, plane.y, plane.z])
##get the rotation matrix
#rmatrix= rotationMatrix(normal)
##print rmatrix
##for point in data.points:
##print point
##p= np.array([point.x, point.y, point.z])
##flattened_point= project(p, plane, normal)
##print flattened_point
##print np.dot(rmatrix,flattened_point)
try:
resp= self.seperation()
print resp.result
#self.pub.publish(resp.clusters[0])
im= Image()
im.header.seq= 72
im.header.stamp.secs= 1365037570
im.header.stamp.nsecs= 34077284
im.header.frame_id= '/camera_rgb_optical_frame'
im.height= 480
im.width= 640
im.encoding= '16UC1'
im.is_bigendian= 0
im.step= 1280
im.data= [100 for n in xrange(1280*480)]
for point in resp.clusters[0].points:
x= point.x * 640
y= point.y * 480
im.data[y*1280 + x] = 10
pub_image.publish(im)
except Exception, e:
print "service call failed"
print e
示例15: publish_image
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import step [as 别名]
def publish_image(self):
# get the image from the Nao
img = self.nao_cam.getImageRemote(self.proxy_name)
# copy the data into the ROS Image
ros_img = Image()
ros_img.width = img[0]
ros_img.height = img[1]
ros_img.step = img[2] * img[0]
ros_img.header.stamp.secs = img[5]
ros_img.data = img[6]
ros_img.is_bigendian = False
ros_img.encoding = "rgb8"
ros_img.data = img[6]
# publish the image
self.nao_cam_pub.publish(ros_img)