本文整理汇总了Python中matplotlib.cm.jet_r方法的典型用法代码示例。如果您正苦于以下问题:Python cm.jet_r方法的具体用法?Python cm.jet_r怎么用?Python cm.jet_r使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类matplotlib.cm
的用法示例。
在下文中一共展示了cm.jet_r方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: plot_pc_old
# 需要导入模块: from matplotlib import cm [as 别名]
# 或者: from matplotlib.cm import jet_r [as 别名]
def plot_pc_old(pc_np, z_cutoff=70, birds_view=False, color='height', size=0.3, ax=None):
# remove large z points
valid_index = pc_np[:, 2] < z_cutoff
pc_np = pc_np[valid_index, :]
if ax is None:
fig = plt.figure(figsize=(9, 9))
ax = Axes3D(fig)
if color == 'height':
c = pc_np[:, 1]
ax.scatter(pc_np[:, 0].tolist(), pc_np[:, 1].tolist(), pc_np[:, 2].tolist(), s=size, c=c, cmap=cm.jet_r)
elif color == 'reflectance':
assert False
else:
ax.scatter(pc_np[:, 0].tolist(), pc_np[:, 1].tolist(), pc_np[:, 2].tolist(), s=size, c=color)
axisEqual3D(ax)
if True == birds_view:
ax.view_init(elev=0, azim=-90)
else:
ax.view_init(elev=-45, azim=-90)
# ax.invert_yaxis()
return ax
示例2: colorize_depth
# 需要导入模块: from matplotlib import cm [as 别名]
# 或者: from matplotlib.cm import jet_r [as 别名]
def colorize_depth(depth_map):
# scale everything to [0, 255]
sorted_depth = np.unique(np.sort(depth_map.flatten()))
min_depth = sorted_depth[0]
max_depth = sorted_depth[len(sorted_depth) - 1]
depth_map = np.asarray(map(lambda pixel:
(pixel - min_depth) * 1.0 / (max_depth - min_depth),
depth_map))
# Apply jet colormap to it
depth_map = np.uint8(cm.jet_r(depth_map) * 255)
return depth_map[:, :, 0:3]
# Given a CSV row of metadata, colorize the image and save into a destination
示例3: save_gradcam
# 需要导入模块: from matplotlib import cm [as 别名]
# 或者: from matplotlib.cm import jet_r [as 别名]
def save_gradcam(filename, gcam, raw_image, paper_cmap=False):
gcam = gcam.cpu().numpy()
cmap = cm.jet_r(gcam)[..., :3] * 255.0
if paper_cmap:
alpha = gcam[..., None]
gcam = alpha * cmap + (1 - alpha) * raw_image
else:
gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
cv2.imwrite(filename, np.uint8(gcam))
示例4: draw_node
# 需要导入模块: from matplotlib import cm [as 别名]
# 或者: from matplotlib.cm import jet_r [as 别名]
def draw_node(cls, node, surface, origin, size, config):
import pygame
cmap = cm.jet_r
norm = mpl.colors.Normalize(vmin=0, vmax=1 / (1 - config["gamma"]))
color = cmap(norm(node.get_value()), bytes=True)
pygame.draw.rect(surface, color, (origin[0], origin[1], size[0], size[1]), 0)
示例5: display_highway
# 需要导入模块: from matplotlib import cm [as 别名]
# 或者: from matplotlib.cm import jet_r [as 别名]
def display_highway(cls, agent, surface):
"""
Particular visualization of the state space that is used for highway_env environments only.
:param agent: the agent to be displayed
:param surface: the surface on which the agent is displayed.
"""
import pygame
norm = mpl.colors.Normalize(vmin=-2, vmax=2)
cmap = cm.jet_r
try:
grid_shape = agent.mdp.original_shape
except AttributeError:
grid_shape = cls.highway_module.finite_mdp.compute_ttc_grid(agent.env, time_quantization=1., horizon=10.).shape
cell_size = (surface.get_width() // grid_shape[2], surface.get_height() // (grid_shape[0] * grid_shape[1]))
speed_size = surface.get_height() // grid_shape[0]
value = agent.get_state_value().reshape(grid_shape)
for h in range(grid_shape[0]):
for i in range(grid_shape[1]):
for j in range(grid_shape[2]):
color = cmap(norm(value[h, i, j]), bytes=True)
pygame.draw.rect(surface, color, (
j * cell_size[0], i * cell_size[1] + h * speed_size, cell_size[0], cell_size[1]), 0)
pygame.draw.line(surface, cls.BLACK,
(0, h * speed_size), (grid_shape[2] * cell_size[0], h * speed_size), 1)
states, actions = agent.plan_trajectory(agent.mdp.state)
for state in states:
(h, i, j) = np.unravel_index(state, grid_shape)
pygame.draw.rect(surface, cls.RED,
(j * cell_size[0], i * cell_size[1] + h * speed_size, cell_size[0], cell_size[1]), 1)
示例6: display
# 需要导入模块: from matplotlib import cm [as 别名]
# 或者: from matplotlib.cm import jet_r [as 别名]
def display(cls, agent, surface, sim_surface=None, display_text=True):
"""
Display the action-values for the current state
:param agent: the DQNAgent to be displayed
:param surface: the pygame surface on which the agent is displayed
:param sim_surface: the pygame surface on which the env is rendered
:param display_text: whether to display the action values as text
"""
import pygame
action_values = agent.get_state_action_values(agent.previous_state)
action_distribution = agent.action_distribution(agent.previous_state)
cell_size = (surface.get_width() // len(action_values), surface.get_height())
pygame.draw.rect(surface, cls.BLACK, (0, 0, surface.get_width(), surface.get_height()), 0)
# Display node value
for action, value in enumerate(action_values):
cmap = cm.jet_r
norm = mpl.colors.Normalize(vmin=0, vmax=1/(1-agent.config["gamma"]))
color = cmap(norm(value), bytes=True)
pygame.draw.rect(surface, color, (cell_size[0]*action, 0, cell_size[0], cell_size[1]), 0)
if display_text:
font = pygame.font.Font(None, 15)
text = "v={:.2f} / p={:.2f}".format(value, action_distribution[action])
text = font.render(text,
1, (10, 10, 10), (255, 255, 255))
surface.blit(text, (cell_size[0]*action, 0))
if sim_surface and hasattr(agent.value_net, "get_attention_matrix"):
cls.display_vehicles_attention(agent, sim_surface)
示例7: live
# 需要导入模块: from matplotlib import cm [as 别名]
# 或者: from matplotlib.cm import jet_r [as 别名]
def live(config_path, model_path, cuda, crf, camera_id):
"""
Inference from camera stream
"""
# Setup
CONFIG = OmegaConf.load(config_path)
device = get_device(cuda)
torch.set_grad_enabled(False)
torch.backends.cudnn.benchmark = True
classes = get_classtable(CONFIG)
postprocessor = setup_postprocessor(CONFIG) if crf else None
model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)
state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict)
model.eval()
model.to(device)
print("Model:", CONFIG.MODEL.NAME)
# UVC camera stream
cap = cv2.VideoCapture(camera_id)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV"))
def colorize(labelmap):
# Assign a unique color to each label
labelmap = labelmap.astype(np.float32) / CONFIG.DATASET.N_CLASSES
colormap = cm.jet_r(labelmap)[..., :-1] * 255.0
return np.uint8(colormap)
def mouse_event(event, x, y, flags, labelmap):
# Show a class name of a mouse-overed pixel
label = labelmap[y, x]
name = classes[label]
print(name)
window_name = "{} + {}".format(CONFIG.MODEL.NAME, CONFIG.DATASET.NAME)
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
while True:
_, frame = cap.read()
image, raw_image = preprocessing(frame, device, CONFIG)
labelmap = inference(model, image, raw_image, postprocessor)
colormap = colorize(labelmap)
# Register mouse callback function
cv2.setMouseCallback(window_name, mouse_event, labelmap)
# Overlay prediction
cv2.addWeighted(colormap, 0.5, raw_image, 0.5, 0.0, raw_image)
# Quit by pressing "q" key
cv2.imshow(window_name, raw_image)
if cv2.waitKey(10) == ord("q"):
break