本文整理汇总了Python中nav_msgs.msg.OccupancyGrid.data[idx]方法的典型用法代码示例。如果您正苦于以下问题:Python OccupancyGrid.data[idx]方法的具体用法?Python OccupancyGrid.data[idx]怎么用?Python OccupancyGrid.data[idx]使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nav_msgs.msg.OccupancyGrid
的用法示例。
在下文中一共展示了OccupancyGrid.data[idx]方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_scan
# 需要导入模块: from nav_msgs.msg import OccupancyGrid [as 别名]
# 或者: from nav_msgs.msg.OccupancyGrid import data[idx] [as 别名]
def process_scan(self, msg):
""" Callback function for the laser scan messages """
if len(msg.ranges) <= 330:
# throw out scans that don't have more than 90% of the data
return
# get pose according to the odometry
p = PoseStamped(header=Header(stamp=msg.header.stamp, frame_id="base_link"), pose=Pose())
self.odom_pose = self.tf_listener.transformPose("odom", p)
self.base_pose = self.tf_listener.transformPose("base_laser_link", p)
# convert the odom pose to the tuple (x,y,theta)
self.odom_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.odom_pose.pose)
#(-0.0069918, 0.000338577, 0.048387097)
#(1.0208817, 0.04827240, 0.048387)
self.base_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.base_pose.pose)
for i in range(len(msg.ranges)):
if 0.0 < msg.ranges[i] < 5.0: #for any reding within 5 meters
#Using the pose and the measurement nd the angle, find it in the world
map_x = self.odom_pose[0] + msg.ranges[i] * cos(i * pi / 180.0 + self.odom_pose[2])
map_y = self.odom_pose[1] + msg.ranges[i] * sin(i * pi / 180.0 + self.odom_pose[2])
#Relate that map measure with a place in the picture
x_detect = int((map_x - self.origin[0]) / self.resolution)
y_detect = int((map_y - self.origin[1]) / self.resolution)
#Determine how to mark the location in the map, along with the stuff inbetween
u = (map_x - self.odom_pose[0], map_y - self.odom_pose[1])
magnitude = sqrt(u[0] ** 2 + u[1] ** 2)
n_steps = max([1, int(ceil(magnitude / self.resolution))])
u_step = (u[0] / (n_steps - 1), u[1] / (n_steps - 1))
marked = set()
for i in range(n_steps):
curr_x = self.odom_pose[0] + i * u_step[0]
curr_y = self.odom_pose[1] + i * u_step[1]
if not (self.is_in_map(curr_x, curr_y)):
break
x_ind = int((curr_x - self.origin[0]) / self.resolution)
y_ind = int((curr_y - self.origin[1]) / self.resolution)
if x_ind == x_detect and y_ind == y_detect:
break
if not ((x_ind, y_ind) in marked):
# odds ratio is updated according to the inverse sensor model
self.odds_ratios[x_ind, y_ind] *= self.p_occ / (1 - self.p_occ) * self.odds_ratio_miss
marked.add((x_ind, y_ind))
if self.is_in_map(map_x, map_y):
# odds ratio is updated according to the inverse sensor model
self.odds_ratios[x_detect, y_detect] *= self.p_occ / (1 - self.p_occ) * self.odds_ratio_hit
self.seq += 1
# to save time, only publish the map every 10 scans that we process
if self.seq % 10 == 0:
# make occupancy grid
map = OccupancyGrid()
map.header.seq = self.seq
self.seq += 1
map.header.stamp = msg.header.stamp
map.header.frame_id = "map" # the name of the coordinate frame of the map
map.info.origin.position.x = self.origin[0]
map.info.origin.position.y = self.origin[1]
map.info.width = self.n
map.info.height = self.n
map.info.resolution = self.resolution
map.data = [0] * self.n ** 2 # map.data stores the n by n grid in row-major order
for i in range(self.n):
for j in range(self.n):
idx = i + self.n * j # this implements row major order
if self.odds_ratios[i, j] < 1 / 5.0: # consider a cell free if odds ratio is low enough
map.data[idx] = 0
elif self.odds_ratios[i, j] > 5.0: # consider a cell occupied if odds ratio is high enough
map.data[idx] = 100
else: # otherwise cell is unknown
map.data[idx] = -1
self.pub.publish(map)
# create the image from the probabilities so we can visualize using opencv
im = np.zeros((self.odds_ratios.shape[0], self.odds_ratios.shape[1], 3))
for i in range(im.shape[0]):
for j in range(im.shape[1]):
if self.odds_ratios[i, j] < 1 / 5.0:
im[i, j, :] = 1.0
elif self.odds_ratios[i, j] > 5.0:
im[i, j, :] = 0.0
else:
im[i, j, :] = 0.5
# compute the index of the odometry pose so we can mark it with a circle
x_odom_index = int((self.odom_pose[0] - self.origin[0]) / self.resolution)
y_odom_index = int((self.odom_pose[1] - self.origin[1]) / self.resolution)
x_base_index = int((self.base_pose[0] - self.origin[0] - 1) / self.resolution)
y_base_index = int((self.base_pose[1] - self.origin[1]) / self.resolution)
# computer the ball locations so we can mark with a colored circle
#TODO Track and relate the robot's angle pose for accuracy
if self.depth_red > 0:
self.y_camera_red = int(x_odom_index - self.depth_red * cos(self.angle_diff_red + pi - self.odom_pose[2])/self.resolution)
self.x_camera_red = int(y_odom_index - self.depth_red * -sin(self.angle_diff_red + pi - self.odom_pose[2])/self.resolution)
#.........这里部分代码省略.........
示例2: scan_received
# 需要导入模块: from nav_msgs.msg import OccupancyGrid [as 别名]
# 或者: from nav_msgs.msg.OccupancyGrid import data[idx] [as 别名]
def scan_received(self, msg):
""" Returns an occupancy grid to publish data to map"""
if len(msg.ranges) != 360:
return
#make a pose stamp that relates to the odom of the robot
p = PoseStamped(header=Header(stamp=msg.header.stamp,frame_id="base_link"), pose=Pose())
self.odom_pose = self.tf_listener.transformPose("odom", p)
# convert the odom pose to the tuple (x,y,theta)
self.odom_pose = TransformHelpers.convert_pose_to_xy_and_theta(self.odom_pose.pose)
#local variable used to take odds samples for future comparisons
time_past = 0
for degree in range(360):
if msg.ranges[degree] > 0.0 and msg.ranges[degree] < 5.0:
#gets the position of the laser data points
data_x = self.odom_pose[0] + msg.ranges[degree]*math.cos(degree*math.pi/180.0 + self.odom_pose[2])
data_y = self.odom_pose[1] + msg.ranges[degree]*math.sin(degree*math.pi/180+self.odom_pose[2])
#maps laser data to a pixel in the map
datax_pixel = int((data_x - self.origin[0])/self.resolution)
datay_pixel = int((data_y - self.origin[1])/self.resolution)
#maps the robot to a position
robot = (data_x - self.odom_pose[0], data_y - self.odom_pose[1])
#finds how far away the point is from the robot
magnitude = math.sqrt(robot[0]**2 + robot[1]**2)
#converts magnitude and robot position to pixels in the map
n_steps = max([1, int(math.ceil(magnitude/self.resolution))])
robot_step = (robot[0]/(n_steps-1), robot[1]/(n_steps-1))
marked = set()
for pixel in range(n_steps):
curr_x = self.odom_pose[0] + pixel*robot_step[0]
curr_y = self.odom_pose[1] + pixel*robot_step[1]
if (self.is_in_map(curr_x, curr_y)):
#make sure its in the map
break
x_ind = int((curr_x - self.origin[0])/self.resolution)
y_ind = int((curr_y - self.origin[1])/self.resolution)
if x_ind == datax_pixel and y_ind==datay_pixel and self.odds_ratios[datax_pixel, datay_pixel] >= 1/60.0:
#set odds ratio equal to past odds ratio
if time_past % 5 == 0:
self.past_odds_ratios[datax_pixel, datay_pixel]=self.odds_ratios[datax_pixel, datay_pixel]
time_past += 1
self.odds_ratios[datax_pixel, datay_pixel] *= self.p_occ[datax_pixel, datay_pixel]/(1-self.p_occ[datax_pixel, datay_pixel]) * self.odds_ratio_hit
if not((x_ind, y_ind) in marked) and self.odds_ratios[x_ind, y_ind] >= 1/60.0:
#If point isn't marked, update the odds of missing and add to the map
if time_past % 5 == 0:
self.past_odds_ratios[x_ind, y_ind]=self.odds_ratios[x_ind, y_ind]
time_past += 1
self.odds_ratios[x_ind, y_ind] *= self.p_occ[x_ind, y_ind] / (1-self.p_occ[x_ind, y_ind]) * self.odds_ratio_miss
marked.add((x_ind, y_ind))
if not(self.is_in_map(data_x, data_y)) and self.odds_ratios[data_x, datay_pixel] >= 1/60.0:
#if it is not in the map, update the odds of hitting it
if time_past % 5 == 0:
self.past_odds_ratios[datax_pixel, datay_pixel]=self.odds_ratios[datax_pixel, datay_pixel]
time_past += 1
self.odds_ratios[datax_pixel, datay_pixel] *= self.p_occ[datax_pixel, datay_pixel]/(1-self.p_occ[datax_pixel, datay_pixel]) * self.odds_ratio_hit
self.seq += 1
if self.seq % 10 == 0:
map = OccupancyGrid() #this is a nav msg class
map.header.seq = self.seq
map.header.stamp = msg.header.stamp
map.header.frame_id = "map"
map.header.frame_id = "past_map"
map.info.origin.position.x = self.origin[0]
map.info.origin.position.y = self.origin[1]
map.info.width = self.n
map.info.height = self.n
map.info.resolution = self.resolution
map.data = [0]*self.n**2 #the zero is a formatter, not a multiple of 0
for i in range(self.n):
#this is giving us the i,j grid square, occupancy grid
for j in range(self.n):
idx = i+self.n*j #makes horizontal rows (i is x, j is y)
if self.odds_ratios[i,j] < 1/50.0:
map.data[idx] = 0 #makes the gray
elif self.odds_ratios[i,j] >= 1/50.0 < 4.0/5.0:
map.data[idx] = 25
elif self.odds_ratios[i,j] > 50:
map.data[idx] = 100 #makes the black walls
else:
map.data[idx] = -1 #makes unknown
self.pub.publish(map)
image = np.zeros((self.odds_ratios.shape[0], self.odds_ratios.shape[1],3))
image2 = np.zeros((self.odds_ratios.shape[0], self.odds_ratios.shape[1],3))
self.counter+=1
x_odom_index = int((self.odom_pose[0] - self.origin[0])/self.resolution)
y_odom_index = int((self.odom_pose[1] - self.origin[1])/self.resolution)
#.........这里部分代码省略.........
示例3: scan_received
# 需要导入模块: from nav_msgs.msg import OccupancyGrid [as 别名]
# 或者: from nav_msgs.msg.OccupancyGrid import data[idx] [as 别名]
def scan_received(self, msg):
""" Returns an occupancy grid to publish data to map"""
if len(msg.ranges) != 360:
return
#make a pose stamp that relates to the odom of the robot
p = PoseStamped(header=Header(stamp=msg.header.stamp,frame_id="base_link"), pose=Pose())
self.odom_pose = self.tf_listener.transformPose("odom", p)
# convert the odom pose to the tuple (x,y,theta)
self.odom_pose = TransformHelpers.convert_pose_to_xy_and_theta(self.odom_pose.pose)
for degree in range(360):
if msg.ranges[degree] > 0.0 and msg.ranges[degree] < 5.0:
#gets the position of the laser data points
data_x = self.odom_pose[0] + msg.ranges[degree]*math.cos(degree*math.pi/180.0 + self.odom_pose[2])
data_y = self.odom_pose[1] + msg.ranges[degree]*math.sin(degree*math.pi/180+self.odom_pose[2])
#maps laser data to a pixel in the map
datax_pixel = int((data_x - self.origin[0])/self.resolution)
datay_pixel = int((data_y - self.origin[1])/self.resolution)
#maps the robot to a position
robot = (data_x - self.odom_pose[0], data_y - self.odom_pose[1])
#finds how far away the point is from the robot
magnitude = math.sqrt(robot[0]**2 + robot[1]**2)
#converts magnitude and robot position to pixels in the map
n_steps = max([1, int(math.ceil(magnitude/self.resolution))])
robot_step = (robot[0]/(n_steps-1), robot[1]/(n_steps-1))
marked = set()
for pixel in range(n_steps):
curr_x = self.odom_pose[0] + pixel*robot_step[0]
curr_y = self.odom_pose[1] + pixel*robot_step[1]
if (self.is_in_map(curr_x, curr_y)):
#make sure its in the map
break
x_ind = int((curr_x - self.origin[0])/self.resolution)
y_ind = int((curr_y - self.origin[1])/self.resolution)
if x_ind == datax_pixel and y_ind==datay_pixel:
#set odds ratio equal to past odds ratio
self.past_odds_ratios[datax_pixel, datay_pixel]=self.odds_ratios[datax_pixel, datay_pixel]
self.odds_ratios[datax_pixel, datay_pixel] *= self.p_occ[datax_pixel, datay_pixel]/(1-self.p_occ[datax_pixel, datay_pixel]) * self.odds_ratio_hit
if not((x_ind, y_ind) in marked) and self.odds_ratios[x_ind, y_ind] >= 1/60.0:
#If point isn't marked, update the odds of missing and add to the map
self.past_odds_ratios[datax_pixel, datay_pixel]=self.odds_ratios[datax_pixel, datay_pixel]
self.odds_ratios[x_ind, y_ind] *= self.p_occ[x_ind, y_ind] / (1-self.p_occ[x_ind, y_ind]) * self.odds_ratio_miss
#self.p_occ[x_ind, y_ind] *= self.p_occ[x_ind, y_ind] * self.odds_ratio_miss/self.odds_ratio_hit
marked.add((x_ind, y_ind))
#print 'New Point'
if not(self.is_in_map(data_x, data_y)):
#if it is not in the map, update the odds of hitting it
self.past_odds_ratios[datax_pixel, datay_pixel]=self.odds_ratios[datax_pixel, datay_pixel]
self.odds_ratios[datax_pixel, datay_pixel] *= self.p_occ[datax_pixel, datay_pixel]/(1-self.p_occ[datax_pixel, datay_pixel]) * self.odds_ratio_hit
#print 'Here'
#self.p_occ[datax_pixel, datay_pixel] *= self.odds_ratios[datax_pixel, datay_pixel]*self.odds_ratio_hit
#else:
# self.odds_ratios[datax_pixel, datay_pixel] *= self.p_occ[datax_pixel, datay_pixel]/(1-self.p_occ[datax_pixel, datay_pixel]) * self.odds_ratio_miss
# self.p_occ[datax_pixel, datay_pixel] *= self.odds_ratio_hit
self.seq += 1
if self.seq % 10 == 0:
map = OccupancyGrid() #this is a nav msg class
map.header.seq = self.seq
map.header.stamp = msg.header.stamp
map.header.frame_id = "map"
map.header.frame_id = "past_map"
map.info.origin.position.x = self.origin[0]
map.info.origin.position.y = self.origin[1]
map.info.width = self.n
map.info.height = self.n
map.info.resolution = self.resolution
map.data = [0]*self.n**2 #the zero is a formatter, not a multiple of 0
for i in range(self.n):
#this is giving us the i,j grid square, occupancy grid
for j in range(self.n):
idx = i+self.n*j #makes horizontal rows (i is x, j is y)
if self.odds_ratios[i,j] < 1/5.0:
map.data[idx] = 0 #makes the gray
elif self.odds_ratios[i,j] >= 1/5.0 < 0.5:
map.data[idx] = 25
elif self.odds_ratios[i,j] > 0.5:
map.data[idx] = 100 #makes the black walls
else:
map.data[idx] = -1 #makes unknown
self.pub.publish(map)
#TODO: Change display such that we're not just looking at the ratio, but mapping the dynamic archive and current readings
image = np.zeros((self.odds_ratios.shape[0], self.odds_ratios.shape[1],3))
image2 = np.zeros((self.odds_ratios.shape[0], self.odds_ratios.shape[1],3))
self.counter+=1
#.shape() comes from being related to the np class
for i in range(image.shape[0]):
for j in range(image.shape[1]):
#print self.past_odds_ratios[i,j]
#print self.odds_ratios[i,j]
delta = self.odds_ratios[i,j]-self.past_odds_ratios[i,j]
#.........这里部分代码省略.........
示例4: scan_received
# 需要导入模块: from nav_msgs.msg import OccupancyGrid [as 别名]
# 或者: from nav_msgs.msg.OccupancyGrid import data[idx] [as 别名]
def scan_received(self, msg):
""" Returns an occupancy grid to publish data to map"""
if len(msg.ranges) != 360:
return
#make a pose stamp that relates to the odom of the robot
p = PoseStamped(header=Header(stamp=msg.header.stamp,frame_id="base_link"), pose=Pose())
self.odom_pose = self.tf_listener.transformPose("odom", p)
# convert the odom pose to the tuple (x,y,theta)
self.odom_pose = TransformHelpers.convert_pose_to_xy_and_theta(self.odom_pose.pose)
time_past = 0
for degree in range(360):
if msg.ranges[degree] > 0.0 and msg.ranges[degree] < 5.0:
#gets the position of the laser data points
data_x = self.odom_pose[0] + msg.ranges[degree]*math.cos(degree*math.pi/180.0 + self.odom_pose[2])
data_y = self.odom_pose[1] + msg.ranges[degree]*math.sin(degree*math.pi/180+self.odom_pose[2])
#maps laser data to a pixel in the map
datax_pixel = int((data_x - self.origin[0])/self.resolution)
datay_pixel = int((data_y - self.origin[1])/self.resolution)
#maps the robot to a position
robot = (data_x - self.odom_pose[0], data_y - self.odom_pose[1])
#finds how far away the point is from the robot
magnitude = math.sqrt(robot[0]**2 + robot[1]**2)
#converts magnitude and robot position to pixels in the map
n_steps = max([1, int(math.ceil(magnitude/self.resolution))])
robot_step = (robot[0]/(n_steps-1), robot[1]/(n_steps-1))
marked = set()
for pixel in range(n_steps):
curr_x = self.odom_pose[0] + pixel*robot_step[0]
curr_y = self.odom_pose[1] + pixel*robot_step[1]
if (self.is_in_map(curr_x, curr_y)):
#make sure its in the map
break
x_ind = int((curr_x - self.origin[0])/self.resolution)
y_ind = int((curr_y - self.origin[1])/self.resolution)
if x_ind == datax_pixel and y_ind==datay_pixel and self.odds_ratios[datax_pixel, datay_pixel] >= 1/60.0:
#set odds ratio equal to past odds ratio
if time_past % 5 == 0:
self.past_odds_ratios[datax_pixel, datay_pixel]=self.odds_ratios[datax_pixel, datay_pixel]
time_past += 1
self.odds_ratios[datax_pixel, datay_pixel] *= self.p_occ[datax_pixel, datay_pixel]/(1-self.p_occ[datax_pixel, datay_pixel]) * self.odds_ratio_hit
if not((x_ind, y_ind) in marked) and self.odds_ratios[x_ind, y_ind] >= 1/60.0:
#If point isn't marked, update the odds of missing and add to the map
if time_past % 5 == 0:
self.past_odds_ratios[x_ind, y_ind]=self.odds_ratios[x_ind, y_ind]
time_past += 1
self.odds_ratios[x_ind, y_ind] *= self.p_occ[x_ind, y_ind] / (1-self.p_occ[x_ind, y_ind]) * self.odds_ratio_miss
#self.p_occ[x_ind, y_ind] *= self.p_occ[x_ind, y_ind] * self.odds_ratio_miss/self.odds_ratio_hit
marked.add((x_ind, y_ind))
#print 'New Point'
if not(self.is_in_map(data_x, data_y)) and self.odds_ratios[data_x, datay_pixel] >= 1/60.0:
#if it is not in the map, update the odds of hitting it
if time_past % 5 == 0:
self.past_odds_ratios[datax_pixel, datay_pixel]=self.odds_ratios[datax_pixel, datay_pixel]
time_past += 1
self.odds_ratios[datax_pixel, datay_pixel] *= self.p_occ[datax_pixel, datay_pixel]/(1-self.p_occ[datax_pixel, datay_pixel]) * self.odds_ratio_hit
self.seq += 1
if self.seq % 10 == 0:
map = OccupancyGrid() #this is a nav msg class
map.header.seq = self.seq
map.header.stamp = msg.header.stamp
map.header.frame_id = "map"
map.header.frame_id = "past_map"
map.info.origin.position.x = self.origin[0]
map.info.origin.position.y = self.origin[1]
map.info.width = self.n
map.info.height = self.n
map.info.resolution = self.resolution
map.data = [0]*self.n**2 #the zero is a formatter, not a multiple of 0
for i in range(self.n):
#this is giving us the i,j grid square, occupancy grid
for j in range(self.n):
idx = i+self.n*j #makes horizontal rows (i is x, j is y)
if self.odds_ratios[i,j] < 1/5.0:
map.data[idx] = 0 #makes the gray
elif self.odds_ratios[i,j] >= 1/5.0 < 0.5:
map.data[idx] = 25
elif self.odds_ratios[i,j] > 0.5:
map.data[idx] = 100 #makes the black walls
else:
map.data[idx] = -1 #makes unknown
self.pub.publish(map)
#TODO: Change display such that we're not just looking at the ratio, but mapping the dynamic archive and current readings
image1 = np.zeros((self.odds_ratios.shape[0], self.odds_ratios.shape[1],3))
image2 = np.zeros((self.odds_ratios.shape[0], self.odds_ratios.shape[1],3))
self.counter+=1
x_odom_index = int((self.odom_pose[0] - self.origin[0])/self.resolution)
#.........这里部分代码省略.........
示例5: process_scan
# 需要导入模块: from nav_msgs.msg import OccupancyGrid [as 别名]
# 或者: from nav_msgs.msg.OccupancyGrid import data[idx] [as 别名]
def process_scan(self, msg):
""" Callback function for the laser scan messages """
if len(msg.ranges) != 360:
# throw out scans that don't have all the laser data
return
# get pose according to the odometry
p = PoseStamped(header=Header(stamp=msg.header.stamp,frame_id="base_link"), pose=Pose())
self.odom_pose = self.tf_listener.transformPose("odom", p)
# convert the odom pose to the tuple (x,y,theta)
self.odom_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.odom_pose.pose)
for i in range(360):
if msg.ranges[i] > 0.0 and msg.ranges[i] < 5.0:
# TODO: draw a picture for this to understand it better
map_x = self.odom_pose[0] + msg.ranges[i]*cos(i*pi/180.0+self.odom_pose[2])
map_y = self.odom_pose[1] + msg.ranges[i]*sin(i*pi/180.0+self.odom_pose[2])
x_detect = int((map_x - self.origin[0])/self.resolution)
y_detect = int((map_y - self.origin[1])/self.resolution)
u = (map_x - self.odom_pose[0], map_y - self.odom_pose[1])
magnitude = sqrt(u[0]**2 + u[1]**2)
n_steps = max([1,int(ceil(magnitude/self.resolution))])
u_step = (u[0]/(n_steps-1), u[1]/(n_steps-1))
marked = set()
for i in range(n_steps):
curr_x = self.odom_pose[0] + i*u_step[0]
curr_y = self.odom_pose[1] + i*u_step[1]
if not(self.is_in_map(curr_x,curr_y)):
break
x_ind = int((curr_x - self.origin[0])/self.resolution)
y_ind = int((curr_y - self.origin[1])/self.resolution)
if x_ind == x_detect and y_ind == y_detect:
break
if not((x_ind,y_ind) in marked):
# odds ratio is updated according to the inverse sensor model
self.odds_ratios[x_ind,y_ind] *= self.p_occ / (1-self.p_occ) * self.odds_ratio_miss
marked.add((x_ind,y_ind))
if self.is_in_map(map_x, map_y):
# odds ratio is updated according to the inverse sensor model
self.odds_ratios[x_detect,y_detect] *= self.p_occ / (1-self.p_occ) * self.odds_ratio_hit
self.seq += 1
# to save time, only publish the map every 10 scans that we process
if self.seq % 10 == 0:
# make occupancy grid
map = OccupancyGrid()
map.header.seq = self.seq
self.seq += 1
map.header.stamp = msg.header.stamp
map.header.frame_id = "map" # the name of the coordinate frame of the map
map.info.origin.position.x = self.origin[0]
map.info.origin.position.y = self.origin[1]
map.info.width = self.n
map.info.height = self.n
map.info.resolution = self.resolution
map.data = [0]*self.n**2 # map.data stores the n by n grid in row-major order
for i in range(self.n):
for j in range(self.n):
idx = i+self.n*j # this implements row major order
if self.odds_ratios[i,j] < 1/5.0: # consider a cell free if odds ratio is low enough
map.data[idx] = 0
elif self.odds_ratios[i,j] > 5.0: # consider a cell occupied if odds ratio is high enough
map.data[idx] = 100
else: # otherwise cell is unknown
map.data[idx] = -1
self.pub.publish(map)
# create the image from the probabilities so we can visualize using opencv
im = np.zeros((self.odds_ratios.shape[0],self.odds_ratios.shape[1],3))
for i in range(im.shape[0]):
for j in range(im.shape[1]):
if self.odds_ratios[i,j] < 1/5.0:
im[i,j,:] = 1.0
elif self.odds_ratios[i,j] > 5.0:
im[i,j,:] = 0.0
else:
im[i,j,:] = 0.5
# compute the index of the odometry pose so we can mark it with a circle
x_odom_index = int((self.odom_pose[0] - self.origin[0])/self.resolution)
y_odom_index = int((self.odom_pose[1] - self.origin[1])/self.resolution)
# draw the circle
cv2.circle(im,(y_odom_index, x_odom_index),2,(255,0,0))
# display the image resized
cv2.imshow("map",cv2.resize(im,(500,500)))
cv2.waitKey(20)