本文整理汇总了Python中cv2.COLORMAP_BONE属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.COLORMAP_BONE属性的具体用法?Python cv2.COLORMAP_BONE怎么用?Python cv2.COLORMAP_BONE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.COLORMAP_BONE属性的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: heatmap
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLORMAP_BONE [as 别名]
def heatmap(map):
map = (map*255).astype(np.uint8)
return cv2.applyColorMap(map, cv2.COLORMAP_BONE)
示例2: tensor2array
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLORMAP_BONE [as 别名]
def tensor2array(tensor, max_value=None, colormap='rainbow'):
if max_value is None:
tensor=(tensor-tensor.min())/(tensor.max()-tensor.min()+1e-6)
max_value = tensor.max().item()
if tensor.ndimension() == 2 or tensor.size(0) == 1:
try:
import cv2
if cv2.__version__.startswith('3'):
color_cvt = cv2.COLOR_BGR2RGB
else: # 2.4
color_cvt = cv2.cv.CV_BGR2RGB
if colormap == 'rainbow':
colormap = cv2.COLORMAP_RAINBOW
elif colormap == 'bone':
colormap = cv2.COLORMAP_BONE
array = (tensor.squeeze().numpy()*255./max_value).clip(0, 255).astype(np.uint8)
colored_array = cv2.applyColorMap(array, colormap)
array = cv2.cvtColor(colored_array, color_cvt).astype(np.float32)/255
except ImportError:
if tensor.ndimension() == 2:
tensor.unsqueeze_(2)
array = (tensor.expand(tensor.size(0), tensor.size(1), 3).numpy()/max_value).clip(0,1)
elif tensor.ndimension() == 3:
assert(tensor.size(0) == 3)
array = 0.5 + tensor.numpy().transpose(1, 2, 0)*0.5
#for tensorboardx 1.4
#array=array.transpose(2,0,1)
return array
示例3: tensor2array
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLORMAP_BONE [as 别名]
def tensor2array(tensor, max_value=255, colormap='rainbow'):
if max_value is None:
max_value = tensor.max()
if tensor.ndimension() == 2 or tensor.size(0) == 1:
try:
import cv2
if cv2.__version__.startswith('2'): # 2.4
color_cvt = cv2.cv.CV_BGR2RGB
else:
color_cvt = cv2.COLOR_BGR2RGB
if colormap == 'rainbow':
colormap = cv2.COLORMAP_RAINBOW
elif colormap == 'bone':
colormap = cv2.COLORMAP_BONE
array = (255*tensor.squeeze().numpy()/max_value).clip(0, 255).astype(np.uint8)
colored_array = cv2.applyColorMap(array, colormap)
array = cv2.cvtColor(colored_array, color_cvt).astype(np.float32)/255
#array = array.transpose(2, 0, 1)
except ImportError:
if tensor.ndimension() == 2:
tensor.unsqueeze_(2)
array = (tensor.expand(tensor.size(0), tensor.size(1), 3).numpy()/max_value).clip(0,1)
elif tensor.ndimension() == 3:
#assert(tensor.size(0) == 3)
#array = 0.5 + tensor.numpy()*0.5
array = 0.5 + tensor.numpy().transpose(1,2,0)*0.5
return array
示例4: get_visualization
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLORMAP_BONE [as 别名]
def get_visualization(img_list, label_list, ms_vect, ms_prob, ds=6, idx=0):
dim = ms_vect[0].size(1)
H, W = img_list[0].size()[2:]
with torch.no_grad():
raw_img0 = _recover_img(img_list[0][idx].data)
raw_img1 = _recover_img(img_list[1][idx].data)
for l in range(len(ms_vect)):
# image
vis_list = [raw_img0, raw_img1]
# ground-truth flow
gt_flo, valid_mask = downsample_flow(label_list[0],
1 / 2**(ds - l))
gt_flo = F.interpolate(gt_flo, (H, W), mode='nearest')[idx]
valid_mask = F.interpolate(valid_mask, (H, W), mode='nearest')[idx]
max_mag1 = torch.max(torch.norm(gt_flo, 2, 0))
# predicted flow
pred_flo = ms_vect[l]
if dim == 1:
pred_flo = disp2flow(pred_flo)
pred_flo = F.interpolate(pred_flo, (H, W), mode='nearest')[idx]
max_mag2 = torch.max(torch.norm(pred_flo, 2, 0))
max_mag = max(float(max_mag1), float(max_mag2))
vis_list.append(_flow_to_img(gt_flo, max_mag))
vis_list.append(_flow_to_img(pred_flo, max_mag))
# epe error visualization
epe_error = torch.norm(
pred_flo - gt_flo, 2, 0, keepdim=False) * valid_mask[0, :, :]
normalizer = max(torch.max(epe_error), 1)
epe_error = 1 - epe_error / normalizer
vis_list.append(_visualize_heat(epe_error))
# confidence map visualization
prob = ms_prob[l].data
prob = prob_gather(prob, normalize=True, dim=dim)
if prob.size(2) != H or prob.size(3) != W:
prob = F.interpolate(prob, (H, W), mode='nearest')
vis_list.append(
_visualize_heat(prob[idx].squeeze(), cv2.COLORMAP_BONE))
vis = torch.cat(vis_list, dim=2)
if l == 0:
ms_vis = vis
else:
ms_vis = torch.cat([ms_vis, vis], dim=1)
return ms_vis.unsqueeze(0)