本文整理汇总了Python中sensor_msgs.msg.Image.is_bigendian方法的典型用法代码示例。如果您正苦于以下问题:Python Image.is_bigendian方法的具体用法?Python Image.is_bigendian怎么用?Python Image.is_bigendian使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sensor_msgs.msg.Image
的用法示例。
在下文中一共展示了Image.is_bigendian方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: appendMessages
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def appendMessages(self, stamp, messages):
if not self._image is None:
# Build the image message and push it on the message list
msg = Image()
msg.header.stamp = stamp
msg.header.frame_id = self._frameId
msg.width = self._image.shape[0]
msg.height = self._image.shape[1]
if (len(self._image.shape) == 2) or (len(self._image.shape) == 3 and self._image.shape[2] == 1):
# A gray image
msg.encoding = '8UC1'
stepMult = 1
elif len(self._image.shape) == 3 and self._image.shape[2] == 3:
# A color image
msg.encoding = 'rgb8'
stepMult = 3
elif len(self._image.shape) == 3 and self._image.shape[2] == 4:
# A color image
msg.encoding = 'rgba8'
stepMult = 3
else:
raise RuntimeError("The parsing of images is very simple. " +\
"Only 3-channel rgb (rgb8), 4 channel rgba " +\
"(rgba8) and 1 channel mono (mono8) are " +\
"supported. Got an image with shape " +\
"{0}".format(self._image.shape))
msg.is_bigendian = False
msg.step = stepMult * msg.width
msg.data = self._image.flatten().tolist()
messages.append((self._topic, msg))
示例2: airpub
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def airpub():
pub = rospy.Publisher("airsim/image_raw", Image, queue_size=1)
rospy.init_node('image_raw', anonymous=True)
rate = rospy.Rate(10) # 10hz
# connect to the AirSim simulator
client = airsim.MultirotorClient()
client.confirmConnection()
while not rospy.is_shutdown():
# get camera images from the car
responses = client.simGetImages([
airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)]) #scene vision image in uncompressed RGBA array
for response in responses:
img_rgba_string = response.image_data_uint8
# Populate image message
msg=Image()
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = "frameId"
msg.encoding = "rgba8"
msg.height = 360 # resolution should match values in settings.json
msg.width = 640
msg.data = img_rgba_string
msg.is_bigendian = 0
msg.step = msg.width * 4
# log time and size of published image
rospy.loginfo(len(response.image_data_uint8))
# publish image message
pub.publish(msg)
# sleep until next cycle
rate.sleep()
示例3: convert
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def convert(data):
width=data.info.width
height=data.info.height
pixelList = [0] *width*height
imageMsg=Image()
imageMsg.header.stamp = rospy.Time.now()
imageMsg.header.frame_id = '1'
imageMsg.height = height
imageMsg.width = width
imageMsg.encoding = 'mono8'
imageMsg.is_bigendian = 0
imageMsg.step = width
for h in range(height):
for w in range(width):
if data.data[h*width+w]==-1:
pixelList[h*width+w] = 150
elif data.data[h*width+w]==0:
pixelList[h*width+w] = 0
elif data.data[h*width+w]==100:
pixelList[h*width+w] = 255
else:
pixelList[h*width+w]=data.data[h*width+w]
print 'ERROR'
imageMsg.data = pixelList
imagePub.publish(imageMsg)
示例4: publish
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def publish( self ):
# Get the image.
image = self.__videoDeviceProxy.getImageRemote( self.__videoDeviceProxyName );
# Create Image message.
ImageMessage = Image();
ImageMessage.header.stamp.secs = image[ 5 ];
ImageMessage.width = image[ 0 ];
ImageMessage.height = image[ 1 ];
ImageMessage.step = image[ 2 ] * image[ 0 ];
ImageMessage.is_bigendian = False;
ImageMessage.encoding = 'bgr8';
ImageMessage.data = image[ 6 ];
self.__imagePublisher.publish( ImageMessage );
# Create CameraInfo message.
# Data from the calibration phase is hard coded for now.
CameraInfoMessage = CameraInfo();
CameraInfoMessage.header.stamp.secs = image[ 5 ];
CameraInfoMessage.width = image[ 0 ];
CameraInfoMessage.height = image[ 1 ];
CameraInfoMessage.D = [ -0.0769218451517258, 0.16183180613612602, 0.0011626049774280595, 0.0018733894100460534, 0.0 ];
CameraInfoMessage.K = [ 581.090096189648, 0.0, 341.0926325830606, 0.0, 583.0323248080421, 241.02441593704128, 0.0, 0.0, 1.0 ];
CameraInfoMessage.R = [ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ];
CameraInfoMessage.P = [ 580.5918359588198, 0.0, 340.76398441334106, 0.0, 0.0, 582.4042541534321, 241.04182225157172, 0.0, 0.0, 0.0, 1.0, 0.0] ;
CameraInfoMessage.distortion_model = 'plumb_bob';
#CameraInfoMessage.roi.x_offset = self.__ROIXOffset;
#CameraInfoMessage.roi.y_offset = self.__ROIYOffset;
#CameraInfoMessage.roi.width = self.__ROIWidth;
#CameraInfoMessage.roi.height = self.__ROIHeight;
#CameraInfoMessage.roi.do_rectify = self.__ROIDoRectify;
self.__cameraInfoPublisher.publish( CameraInfoMessage );
示例5: copy_Image_empty_data
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def copy_Image_empty_data(image_old):
image_new = Image()
image_new.header = copy(image_old.header)
image_new.height = copy(image_old.height)
image_new.width = copy(image_old.width)
image_new.encoding = copy(image_old.encoding)
image_new.is_bigendian = copy(image_old.is_bigendian)
image_new.step = copy(image_old.step)
return image_new
示例6: publishCombined
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def publishCombined(self):
#Enter Main Loop
while not rospy.is_shutdown():
#Convert to Numpy Arrays
map = []
for i in range(0, self.numRobots):
map.append(numpy.array(self.searchedData[i].data))
combined2 = map[0]
if self.numRobots > 1:
#Find Minimum of all maps
for i in range(1, self.numRobots):
combined2 = numpy.minimum(combined2,map[i])
#Pack Occupancy Grid Message
mapMsg=OccupancyGrid()
mapMsg.header.stamp=rospy.Time.now()
mapMsg.header.frame_id=self.mapData.header.frame_id
mapMsg.info.resolution=self.mapData.info.resolution
mapMsg.info.width=self.mapData.info.width
mapMsg.info.height=self.mapData.info.height
mapMsg.info.origin=self.mapData.info.origin
mapMsg.data=combined2.tolist()
#Convert combined Occupancy grid values to grayscal image values
combined2[combined2 == -1] = 150 #Unknown -1->150 (gray)
combined2[combined2 == 100] = 255 #Not_Searched 100->255 (white)
#Searched=0 (black)
#Calculate percentage of open area searched
numNotSearched = combined2[combined2==255].size
numSearched = combined2[combined2==0].size
percentSearched = 100*float(numSearched)/(numNotSearched+numSearched)
percentSearchedMsg = Float32()
percentSearchedMsg.data = percentSearched
self.percentPub.publish(percentSearchedMsg)
#Pack Image Message
imageMsg=Image()
imageMsg.header.stamp = rospy.Time.now()
imageMsg.header.frame_id = self.mapData.header.frame_id
imageMsg.height = self.mapData.info.height
imageMsg.width = self.mapData.info.width
imageMsg.encoding = 'mono8'
imageMsg.is_bigendian = 0
imageMsg.step = self.mapData.info.width
imageMsg.data = combined2.tolist()
#Publish Combined Occupancy Grid and Image
self.searchedCombinePub.publish(mapMsg)
self.imagePub.publish(imageMsg)
#Update Every 0.5 seconds
rospy.sleep(1.0)
示例7: callback
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def callback(self, data):
#print "got some shit coming in"
#if data is not None:
#plane= data.pose.position
#nrm= norm([plane.x, plane.y, plane.z])
#normal= np.array([plane.x, plane.y, plane.z])/nrm
#print "got here"
##replace with numpy array
#plane= np.array([plane.x, plane.y, plane.z])
##get the rotation matrix
#rmatrix= rotationMatrix(normal)
##print rmatrix
##for point in data.points:
##print point
##p= np.array([point.x, point.y, point.z])
##flattened_point= project(p, plane, normal)
##print flattened_point
##print np.dot(rmatrix,flattened_point)
try:
resp= self.seperation()
print resp.result
#self.pub.publish(resp.clusters[0])
im= Image()
im.header.seq= 72
im.header.stamp.secs= 1365037570
im.header.stamp.nsecs= 34077284
im.header.frame_id= '/camera_rgb_optical_frame'
im.height= 480
im.width= 640
im.encoding= '16UC1'
im.is_bigendian= 0
im.step= 1280
im.data= [100 for n in xrange(1280*480)]
for point in resp.clusters[0].points:
x= point.x * 640
y= point.y * 480
im.data[y*1280 + x] = 10
pub_image.publish(im)
except Exception, e:
print "service call failed"
print e
示例8: publish_image
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def publish_image(self):
# get the image from the Nao
img = self.nao_cam.getImageRemote(self.proxy_name)
# copy the data into the ROS Image
ros_img = Image()
ros_img.width = img[0]
ros_img.height = img[1]
ros_img.step = img[2] * img[0]
ros_img.header.stamp.secs = img[5]
ros_img.data = img[6]
ros_img.is_bigendian = False
ros_img.encoding = "rgb8"
ros_img.data = img[6]
# publish the image
self.nao_cam_pub.publish(ros_img)
示例9: OccupancyGridToNavImage
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def OccupancyGridToNavImage(self, grid_map):
img = Image()
img.header = grid_map.header
img.height = grid_map.info.height
img.width = grid_map.info.width
img.is_bigendian = 1
img.step = grid_map.info.width
img.encoding = "mono8"
maxindex = img.height*img.width
numpy.uint
for i in range(0, maxindex, 1):
if int(grid_map.data[i]) < 20:
#img.data[i] = "0"
data.append(0)
else:
img.data[i] = "255"
return imgding
示例10: array_to_image
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def array_to_image(array):
"""Takes a NxMx3 array and converts it into a ROS image message.
"""
# Sanity check the input array shape
if len(array.shape) != 3 or array.shape[2] != 3:
raise ValueError('Array must have shape MxNx3')
# Ravel the array into a single buffer
image_data = (array.astype(np.uint8)).tostring(order='C')
# Create the image message
image_msg = Image()
image_msg.height = array.shape[0]
image_msg.width = array.shape[1]
image_msg.encoding = 'rgb8'
image_msg.is_bigendian = 0
image_msg.step = array.shape[1] * 3
image_msg.data = image_data
return image_msg
示例11: main
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def main():
pub = rospy.Publisher('image_maker', Image)
rospy.init_node('image_maker')
#rospy.wait_for_service('tabletop_segmentation')
im= Image()
im.header.seq= 72
im.header.stamp.secs= 1365037570
im.header.stamp.nsecs= 34077284
im.header.frame_id= '/camera_rgb_optical_frame'
im.height= 480
im.width= 640
im.encoding= '16UC1'
im.is_bigendian= 0
im.step= 1280
im.data= [100 for n in xrange(1280*480)]
while not rospy.is_shutdown():
try:
pub.publish(im)
sleep(.5)
except Exception, e:
print e
示例12: numpy_to_image
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def numpy_to_image(arr, encoding):
if not encoding in name_to_dtypes:
raise TypeError('Unrecognized encoding {}'.format(encoding))
im = Image(encoding=encoding)
# extract width, height, and channels
dtype_class, exp_channels = name_to_dtypes[encoding]
dtype = np.dtype(dtype_class)
if len(arr.shape) == 2:
im.height, im.width, channels = arr.shape + (1,)
elif len(arr.shape) == 3:
im.height, im.width, channels = arr.shape
else:
raise TypeError("Array must be two or three dimensional")
# check type and channels
if exp_channels != channels:
raise TypeError("Array has {} channels, {} requires {}".format(
channels, encoding, exp_channels
))
if dtype_class != arr.dtype.type:
raise TypeError("Array is {}, {} requires {}".format(
arr.dtype.type, encoding, dtype_class
))
# make the array contiguous in memory, as mostly required by the format
contig = np.ascontiguousarray(arr)
im.data = contig.tostring()
im.step = contig.strides[0]
im.is_bigendian = (
arr.dtype.byteorder == '>' or
arr.dtype.byteorder == '=' and sys.byteorder == 'big'
)
return im
示例13:
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
UDK_camera=(240,320)
#box(left, upper, right, lower)
box_A = (0, 0, UDK_camera[1],UDK_camera[0])
box_B = (UDK_camera[1],0,UDK_camera[1]*2,UDK_camera[0])
box_C = (UDK_camera[1]*2,0,UDK_camera[1]*3,UDK_camera[0])
box_D = (UDK_camera[1]*3,0,UDK_camera[1]*4,UDK_camera[0])
clientsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsock.connect((host,port))
c_msg = 'OK\r\n'
clientsock.sendall(c_msg)
#set image info
img_A=Image()
img_A.encoding='rgb8'
img_A.is_bigendian =0
img_A.step =1
img_A.height = UDK_camera[0]
img_A.width = UDK_camera[1]
img_B=Image()
img_B.encoding='rgb8'
img_B.is_bigendian =0
img_B.step =1
img_B.height = UDK_camera[0]
img_B.width = UDK_camera[1]
img_C=Image()
img_C.encoding='rgb8'
img_C.is_bigendian =0
img_C.step =1
示例14: spin
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def spin(self):
while not rospy.is_shutdown():
m = MarkerArray()
stamp, f1_tact, f2_tact, f3_tact, palm_tact = self.bh.getTactileData()
finger_skin_data = []
finger_skin_data.append(f1_tact)
finger_skin_data.append(f2_tact)
finger_skin_data.append(f3_tact)
finger_skin_data.append(palm_tact)
frame_id = []
frame_id.append(self.prefix+"_HandFingerOneKnuckleThreeLink")
frame_id.append(self.prefix+"_HandFingerTwoKnuckleThreeLink")
frame_id.append(self.prefix+"_HandFingerThreeKnuckleThreeLink")
frame_id.append(self.prefix+"_HandPalmLink")
arrows = False
if arrows:
for sens in range(0, 4):
for i in range(0, 24):
halfside1 = PyKDL.Vector(pressure_info.sensor[sens].halfside1[i].x, pressure_info.sensor[sens].halfside1[i].y, pressure_info.sensor[sens].halfside1[i].z)
halfside2 = PyKDL.Vector(pressure_info.sensor[sens].halfside2[i].x, pressure_info.sensor[sens].halfside2[i].y, pressure_info.sensor[sens].halfside2[i].z)
# calculate cross product: halfside1 x halfside2
norm = halfside1*halfside2
norm.Normalize()
scale = 0
if sens == 0:
scale = data.finger1_tip[i]/256.0
elif sens == 1:
scale = data.finger2_tip[i]/256.0
elif sens == 2:
scale = data.finger3_tip[i]/256.0
else:
scale = data.palm_tip[i]/256.0
norm = norm*scale*0.01
marker = Marker()
marker.header.frame_id = frame_id[sens]
marker.header.stamp = rospy.Time.now()
marker.ns = frame_id[sens]
marker.id = i
marker.type = 0
marker.action = 0
marker.points.append(Point(pressure_info.sensor[sens].center[i].x,pressure_info.sensor[sens].center[i].y,pressure_info.sensor[sens].center[i].z))
marker.points.append(Point(pressure_info.sensor[sens].center[i].x+norm.x(), pressure_info.sensor[sens].center[i].y+norm.y(), pressure_info.sensor[sens].center[i].z+norm.z()))
marker.pose = Pose( Point(0,0,0), Quaternion(0,0,0,1) )
marker.scale = Vector3(0.001, 0.002, 0)
marker.color = ColorRGBA(1,0,0,1)
m.markers.append(marker)
self.pub.publish(m)
else:
for sens in range(0, 4):
for i in range(0, 24):
value = self.convertToRGB(int(finger_skin_data[sens][i]/2))
marker = Marker()
marker.header.frame_id = frame_id[sens]
marker.header.stamp = rospy.Time.now()
marker.ns = frame_id[sens]
marker.id = i
marker.type = 1
marker.action = 0
if sens < 3:
marker.pose = pm.toMsg(self.bh.pressure_frames[i])
marker.scale = Vector3(self.bh.pressure_cells_size[i][0], self.bh.pressure_cells_size[i][1], 0.004)
else:
marker.pose = pm.toMsg(self.bh.palm_pressure_frames[i])
marker.scale = Vector3(self.bh.palm_pressure_cells_size[i][0], self.bh.palm_pressure_cells_size[i][1], 0.004)
marker.color = ColorRGBA(1.0/255.0*value[0], 1.0/255.0*value[1], 1.0/255.0*value[2],1)
m.markers.append(marker)
self.pub.publish(m)
# palm f1 f2 f3
# xxx xxx xxx
# xxx xxx xxx
# xxxxx xxx xxx xxx
# xxxxxxx xxx xxx xxx
# 56789xx 9xx 9xx 9xx
# 01234 678 678 678
# 345 345 345
# 012 012 012
im = Image()
im.height = 8
im.width = 19
im.encoding = "rgb8"
im.is_bigendian = 0
im.step = im.width*3
im.data = [0]*(im.step*im.height)
for finger in range(0, 3):
for y in range(0, 8):
for x in range(0, 3):
xim = 8 + x + finger * 4
yim = im.height-1-y
value = self.convertToRGB(int(finger_skin_data[finger][y*3+x]/2))
im.data[(yim*im.width + xim)*3+0] = value[0]
#.........这里部分代码省略.........
示例15: find_object
# 需要导入模块: from sensor_msgs.msg import Image [as 别名]
# 或者: from sensor_msgs.msg.Image import is_bigendian [as 别名]
def find_object(self):
with self.lock_depth:
image_data = self.image_data
depth_data = self.depth_data
pub_data = rospy.Publisher('/racecar/image/data', Float64MultiArray, queue_size=1)
arrayData = Float64MultiArray()
#print "received image"
if image_data:
newImage = Image()
newImage.encoding = image_data.encoding
newImage.header = image_data.header
newImage.is_bigendian = image_data.is_bigendian
newData = bytearray()
image = image_data.data
clusters = []
white = chr(255) *3
black = chr(0) * 3
red = chr(0) + chr(0) + chr(255)
blue = chr(255) + chr(0) * 2
#defining color search thresholds
redThreshold = 230
blueThreshold = 20
greenLowerThreshold = 50
greenUpperThreshold = 200
height = 0
starttime = time.time()
for h in xrange(0, image_data.height, self.resolution):
height += 1
for w in xrange(0, image_data.width, self.resolution):
index = w * 3 + image_data.width * 3 * h
b = ord(image[index])
g = ord(image[index + 1])
r = ord(image[index + 2])
#filtering for the color
if r > redThreshold and b < blueThreshold and g <= greenUpperThreshold and g >= greenLowerThreshold:
for char in white:
newData.append(char)
inCluster = False
for cluster in clusters:
if self.checkAdjacency((w, h), cluster):
cluster.add((w, h))
inCluster = True
if not inCluster:
newCluster = set()
newCluster.add((w, h))
clusters.append(newCluster)
else:
for char in black:
newData.append(char)
newImage.height = height
newImage.width = len(newData)/height/3
newImage.step = len(newData)/height
newImage.data = str(newData)
#print 'time taken: ', time.time() - starttime
clusters = self.reduceClusters(clusters)
#print len(clusters), float(len(candidates_x))/len(newData)
#print [len(cluster) for cluster in clusters]
if len(clusters) > 1:
print "multiple objects found"
print [len(cluster) for cluster in clusters]
if len(clusters):
publishableClusters = [max(clusters, key=lambda x:len(x))]
data_to_publish = []
for cluster in publishableClusters:
centerX = sum(x for (x, y) in cluster)/len(cluster)
centerY = sum(y for (x, y) in cluster)/len(cluster)
width = max(cluster, key=lambda (x, y):x)[0] - min(cluster, key=lambda (x, y):x)[0]
height = max(cluster, key=lambda (x, y):y)[1] - min(cluster, key=lambda (x, y):y)[1]
relativeLocation = float(centerX)/image_data.width - 0.5
relativeAngle = relativeLocation * 100 * math.pi / 180
data_to_publish += [centerX, centerY, width, height, relativeAngle]
#print 'center ', centerX, centerY, len(candidates_x)
#print 'distance ', depth1, depth2
#print 'dim ', height, width
'''depth_index = centerX * 2 + image_data.width * 2 * centerY
depth1 = ord(depth_data.data[depth_index])
depth2 = ord(depth_data.data[depth_index + 1])'''
arrayData.data = data_to_publish
else:
arrayData.data = [-1, -1, -1, -1, -1]
print 'no cone found'
pub_data.publish(arrayData)
#print "publishing"
pub_img = rospy.Publisher('/racecar/image/newImage', Image, queue_size=1)
pub_img.publish(newImage)
else:
arrayData.data = [-1, -1, -1, -1]
#.........这里部分代码省略.........