本文整理汇总了Python中utils.debugger.Debugger方法的典型用法代码示例。如果您正苦于以下问题:Python debugger.Debugger方法的具体用法?Python debugger.Debugger怎么用?Python debugger.Debugger使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils.debugger
的用法示例。
在下文中一共展示了debugger.Debugger方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: demo_image
# 需要导入模块: from utils import debugger [as 别名]
# 或者: from utils.debugger import Debugger [as 别名]
def demo_image(image, model, opt):
s = max(image.shape[0], image.shape[1]) * 1.0
c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)
trans_input = get_affine_transform(
c, s, 0, [opt.input_w, opt.input_h])
inp = cv2.warpAffine(image, trans_input, (opt.input_w, opt.input_h),
flags=cv2.INTER_LINEAR)
inp = (inp / 255. - mean) / std
inp = inp.transpose(2, 0, 1)[np.newaxis, ...].astype(np.float32)
inp = torch.from_numpy(inp).to(opt.device)
out = model(inp)[-1]
pred = get_preds(out['hm'].detach().cpu().numpy())[0]
pred = transform_preds(pred, c, s, (opt.output_w, opt.output_h))
pred_3d = get_preds_3d(out['hm'].detach().cpu().numpy(),
out['depth'].detach().cpu().numpy())[0]
debugger = Debugger()
debugger.add_img(image)
debugger.add_point_2d(pred, (255, 0, 0))
debugger.add_point_3d(pred_3d, 'b')
debugger.show_all_imgs(pause=False)
debugger.show_3d()
示例2: debug
# 需要导入模块: from utils import debugger [as 别名]
# 或者: from utils.debugger import Debugger [as 别名]
def debug(self, batch, output, iter_id):
opt = self.opt
reg = output['reg'] if opt.reg_offset else None
dets = ctdet_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=opt.cat_spec_wh, K=opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets[:, :, :4] *= opt.down_ratio
dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])
dets_gt[:, :, :4] *= opt.down_ratio
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
debugger.add_img(img, img_id='out_pred')
for k in range(len(dets[i])):
if dets[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],
dets[i, k, 4], img_id='out_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt[i])):
if dets_gt[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],
dets_gt[i, k, 4], img_id='out_gt')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
示例3: debug
# 需要导入模块: from utils import debugger [as 别名]
# 或者: from utils.debugger import Debugger [as 别名]
def debug(self, batch, output, iter_id):
opt = self.opt
detections = self.decode(output['hm_t'], output['hm_l'],
output['hm_b'], output['hm_r'],
output['hm_c']).detach().cpu().numpy()
detections[:, :, :4] *= opt.input_res / opt.output_res
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme)
pred_hm = np.zeros((opt.input_res, opt.input_res, 3), dtype=np.uint8)
gt_hm = np.zeros((opt.input_res, opt.input_res, 3), dtype=np.uint8)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.opt.std + self.opt.mean) * 255.).astype(np.uint8)
for p in self.parts:
tag = 'hm_{}'.format(p)
pred = debugger.gen_colormap(output[tag][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch[tag][i].detach().cpu().numpy())
if p != 'c':
pred_hm = np.maximum(pred_hm, pred)
gt_hm = np.maximum(gt_hm, gt)
if p == 'c' or opt.debug > 2:
debugger.add_blend_img(img, pred, 'pred_{}'.format(p))
debugger.add_blend_img(img, gt, 'gt_{}'.format(p))
debugger.add_blend_img(img, pred_hm, 'pred')
debugger.add_blend_img(img, gt_hm, 'gt')
debugger.add_img(img, img_id='out')
for k in range(len(detections[i])):
if detections[i, k, 4] > 0.1:
debugger.add_coco_bbox(detections[i, k, :4], detections[i, k, -1],
detections[i, k, 4], img_id='out')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
示例4: debug
# 需要导入模块: from utils import debugger [as 别名]
# 或者: from utils.debugger import Debugger [as 别名]
def debug(self, batch, output, iter_id):
opt = self.opt
reg = output['reg'] if opt.reg_offset else None
dets = ctdet_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=opt.cat_spec_wh, K=opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets[:, :, :4] *= opt.down_ratio
dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])
dets_gt[:, :, :4] *= opt.down_ratio
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug == 3), theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
debugger.add_img(img, img_id='out_pred')
for k in range(len(dets[i])):
if dets[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],
dets[i, k, 4], img_id='out_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt[i])):
if dets_gt[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],
dets_gt[i, k, 4], img_id='out_gt')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
示例5: main
# 需要导入模块: from utils import debugger [as 别名]
# 或者: from utils.debugger import Debugger [as 别名]
def main():
opt = opts().parse()
if opt.loadModel == '':
opt.loadModel = '../models/Pascal3D-cpu.pth'
model = torch.load(opt.loadModel)
img = cv2.imread(opt.demo)
s = max(img.shape[0], img.shape[1]) * 1.0
c = np.array([img.shape[1] / 2., img.shape[0] / 2.])
img = Crop(img, c, s, 0, ref.inputRes).astype(np.float32).transpose(2, 0, 1) / 256.
input = torch.from_numpy(img.copy()).float()
input = input.view(1, input.size(0), input.size(1), input.size(2))
input_var = torch.autograd.Variable(input).float()
if opt.GPU > -1:
model = model.cuda(opt.GPU)
input_var = input_var.cuda(opt.GPU)
output = model(input_var)
hm = output[-1].data.cpu().numpy()
debugger = Debugger()
img = (input[0].numpy().transpose(1, 2, 0)*256).astype(np.uint8).copy()
inp = img.copy()
star = (cv2.resize(hm[0, 0], (ref.inputRes, ref.inputRes)) * 255)
star[star > 255] = 255
star[star < 0] = 0
star = np.tile(star, (3, 1, 1)).transpose(1, 2, 0)
trans = 0.8
star = (trans * star + (1. - trans) * img).astype(np.uint8)
ps = parseHeatmap(hm[0], thresh = 0.1)
canonical, pred, color, score = [], [], [], []
for k in range(len(ps[0])):
x, y, z = ((hm[0, 1:4, ps[0][k], ps[1][k]] + 0.5) * ref.outputRes).astype(np.int32)
dep = ((hm[0, 4, ps[0][k], ps[1][k]] + 0.5) * ref.outputRes).astype(np.int32)
canonical.append([x, y, z])
pred.append([ps[1][k], ref.outputRes - dep, ref.outputRes - ps[0][k]])
score.append(hm[0, 0, ps[0][k], ps[1][k]])
color.append((1.0 * x / ref.outputRes, 1.0 * y / ref.outputRes, 1.0 * z / ref.outputRes))
cv2.circle(img, (ps[1][k] * 4, ps[0][k] * 4), 4, (255, 255, 255), -1)
cv2.circle(img, (ps[1][k] * 4, ps[0][k] * 4), 2, (int(z * 4), int(y * 4), int(x * 4)), -1)
pred = np.array(pred).astype(np.float32)
canonical = np.array(canonical).astype(np.float32)
pointS = canonical * 1.0 / ref.outputRes
pointT = pred * 1.0 / ref.outputRes
R, t, s = horn87(pointS.transpose(), pointT.transpose(), score)
rotated_pred = s * np.dot(R, canonical.transpose()).transpose() + t * ref.outputRes
debugger.addImg(inp, 'inp')
debugger.addImg(star, 'star')
debugger.addImg(img, 'nms')
debugger.addPoint3D(canonical / ref.outputRes - 0.5, c = color, marker = '^')
debugger.addPoint3D(pred / ref.outputRes - 0.5, c = color, marker = 'x')
debugger.addPoint3D(rotated_pred / ref.outputRes - 0.5, c = color, marker = '*')
debugger.showAllImg(pause = True)
debugger.show3D()
示例6: debug
# 需要导入模块: from utils import debugger [as 别名]
# 或者: from utils.debugger import Debugger [as 别名]
def debug(self, batch, output, iter_id):
cfg = self.cfg
reg = output[3] if cfg.LOSS.REG_OFFSET else None
hm_hp = output[4] if cfg.LOSS.HM_HP else None
hp_offset = output[5] if cfg.LOSS.REG_HP_OFFSET else None
dets = multi_pose_decode(
output[0], output[1], output[2],
reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=cfg.TEST.TOPK)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets[:, :, :4] *= cfg.MODEL.INPUT_RES / cfg.MODEL.OUTPUT_RES
dets[:, :, 5:39] *= cfg.MODEL.INPUT_RES / cfg.MODEL.OUTPUT_RES
dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])
dets_gt[:, :, :4] *= cfg.MODEL.INPUT_RES / cfg.MODEL.OUTPUT_RES
dets_gt[:, :, 5:39] *= cfg.MODEL.INPUT_RES / cfg.MODEL.OUTPUT_RES
for i in range(1):
debugger = Debugger(
dataset=cfg.SAMPLE_METHOD, ipynb=(cfg.DEBUG==3), theme=cfg.DEBUG_THEME)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * np.array(cfg.DATASET.STD).reshape(1,1,3).astype(np.float32) + cfg.DATASET.MEAN) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output[0][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
debugger.add_img(img, img_id='out_pred')
for k in range(len(dets[i])):
if dets[i, k, 4] > cfg.MODEL.CENTER_THRESH:
debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],
dets[i, k, 4], img_id='out_pred')
debugger.add_coco_hp(dets[i, k, 5:39], img_id='out_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt[i])):
if dets_gt[i, k, 4] > cfg.MODEL.CENTER_THRESH:
debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],
dets_gt[i, k, 4], img_id='out_gt')
debugger.add_coco_hp(dets_gt[i, k, 5:39], img_id='out_gt')
if cfg.LOSS.HM_HP:
pred = debugger.gen_colormap_hp(output[4][i].detach().cpu().numpy())
gt = debugger.gen_colormap_hp(batch['hm_hp'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
debugger.add_blend_img(img, gt, 'gt_hmhp')
if cfg.DEBUG == 4:
debugger.save_all_imgs(cfg.LOG_DIR, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
示例7: _debug
# 需要导入模块: from utils import debugger [as 别名]
# 或者: from utils.debugger import Debugger [as 别名]
def _debug(image, t_heat, l_heat, b_heat, r_heat, ct_heat):
debugger = Debugger(num_classes=80)
k = 0
t_heat = torch.sigmoid(t_heat)
l_heat = torch.sigmoid(l_heat)
b_heat = torch.sigmoid(b_heat)
r_heat = torch.sigmoid(r_heat)
aggr_weight = 0.1
t_heat = _h_aggregate(t_heat, aggr_weight=aggr_weight)
l_heat = _v_aggregate(l_heat, aggr_weight=aggr_weight)
b_heat = _h_aggregate(b_heat, aggr_weight=aggr_weight)
r_heat = _v_aggregate(r_heat, aggr_weight=aggr_weight)
t_heat[t_heat > 1] = 1
l_heat[l_heat > 1] = 1
b_heat[b_heat > 1] = 1
r_heat[r_heat > 1] = 1
ct_heat = torch.sigmoid(ct_heat)
t_hm = debugger.gen_colormap(t_heat[k].cpu().data.numpy())
l_hm = debugger.gen_colormap(l_heat[k].cpu().data.numpy())
b_hm = debugger.gen_colormap(b_heat[k].cpu().data.numpy())
r_hm = debugger.gen_colormap(r_heat[k].cpu().data.numpy())
ct_hm = debugger.gen_colormap(ct_heat[k].cpu().data.numpy())
hms = np.maximum(np.maximum(t_hm, l_hm),
np.maximum(b_hm, r_hm))
# debugger.add_img(hms, 'hms')
if image is not None:
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(3, 1, 1)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(3, 1, 1)
img = (image[k].cpu().data.numpy() * std + mean) * 255
img = img.astype(np.uint8).transpose(1, 2, 0)
debugger.add_img(img, 'img')
# debugger.add_blend_img(img, t_hm, 't_hm')
# debugger.add_blend_img(img, l_hm, 'l_hm')
# debugger.add_blend_img(img, b_hm, 'b_hm')
# debugger.add_blend_img(img, r_hm, 'r_hm')
debugger.add_blend_img(img, hms, 'extreme')
debugger.add_blend_img(img, ct_hm, 'center')
debugger.show_all_imgs(pause=False)
示例8: step
# 需要导入模块: from utils import debugger [as 别名]
# 或者: from utils.debugger import Debugger [as 别名]
def step(split, epoch, opt, dataLoader, model, criterion, optimizer = None):
if split == 'train':
model.train()
else:
model.eval()
Loss, Acc = AverageMeter(), AverageMeter()
preds = []
nIters = len(dataLoader)
bar = Bar('{}'.format(opt.expID), max=nIters)
for i, (input, target, meta) in enumerate(dataLoader):
input_var = torch.autograd.Variable(input).float().cuda()
target_var = torch.autograd.Variable(target).float().cuda()
# model = torch.nn.DataParallel(model,device_ids=[0,1,2])
output = model(input_var)
# output = torch.nn.parallel.data_parallel(model,input_var,device_ids=[0,1,2,3,4,5])
if opt.DEBUG >= 2:
gt = getPreds(target.cuda().numpy()) * 4
pred = getPreds((output[opt.nStack - 1].data).cuda().numpy()) * 4
debugger = Debugger()
img = (input[0].numpy().transpose(1, 2, 0)*256).astype(np.uint8).copy()
debugger.addImg(img)
debugger.addPoint2D(pred[0], (255, 0, 0))
debugger.addPoint2D(gt[0], (0, 0, 255))
debugger.showAllImg(pause = True)
loss = criterion(output[0], target_var)
for k in range(1, opt.nStack):
loss += criterion(output[k], target_var)
# Warning.after pytorch0.5.0 -> Tensor.item()代替loss.data[0]
Loss.update(loss.data[0], input.size(0))
Acc.update(Accuracy((output[opt.nStack - 1].data).cpu().numpy(), (target_var.data).cpu().numpy()))
if split == 'train':
# train
optimizer.zero_grad()
loss.backward()
optimizer.step()
else:
input_ = input.cpu().numpy()
input_[0] = Flip(input_[0]).copy()
inputFlip_var = torch.autograd.Variable(torch.from_numpy(input_).view(1, input_.shape[1], ref.inputRes, ref.inputRes)).float().cuda()
outputFlip = model(inputFlip_var)
outputFlip = ShuffleLR(Flip((outputFlip[opt.nStack - 1].data).cpu().numpy()[0])).reshape(1, ref.nJoints, ref.outputRes, ref.outputRes)
output_ = ((output[opt.nStack - 1].data).cpu().numpy() + outputFlip) / 2
preds.append(finalPreds(output_, meta['center'], meta['scale'], meta['rotate'])[0])
Bar.suffix = '{split} Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {loss.avg:.6f} | Acc {Acc.avg:.6f} ({Acc.val:.6f})'.format(epoch, i, nIters, total=bar.elapsed_td, eta=bar.eta_td, loss=Loss, Acc=Acc, split = split)
bar.next()
bar.finish()
return {'Loss': Loss.avg, 'Acc': Acc.avg}, preds
示例9: debug
# 需要导入模块: from utils import debugger [as 别名]
# 或者: from utils.debugger import Debugger [as 别名]
def debug(self, batch, output, iter_id):
opt = self.opt
wh = output['wh'] if opt.reg_bbox else None
reg = output['reg'] if opt.reg_offset else None
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=opt.K)
# x, y, score, r1-r8, depth, dim1-dim3, cls
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
calib = batch['meta']['calib'].detach().numpy()
# x, y, score, rot, depth, dim1, dim2, dim3
# if opt.dataset == 'gta':
# dets[:, 12:15] /= 3
dets_pred = ddd_post_process(
dets.copy(), batch['meta']['c'].detach().numpy(),
batch['meta']['s'].detach().numpy(), calib, opt)
dets_gt = ddd_post_process(
batch['meta']['gt_det'].detach().numpy().copy(),
batch['meta']['c'].detach().numpy(),
batch['meta']['s'].detach().numpy(), calib, opt)
#for i in range(input.size(0)):
for i in range(1):
debugger = Debugger(dataset=opt.dataset, ipynb=(opt.debug==3),
theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.opt.std + self.opt.mean) * 255.).astype(np.uint8)
pred = debugger.gen_colormap(
output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'hm_pred')
debugger.add_blend_img(img, gt, 'hm_gt')
# decode
debugger.add_ct_detection(
img, dets[i], show_box=opt.reg_bbox, center_thresh=opt.center_thresh,
img_id='det_pred')
debugger.add_ct_detection(
img, batch['meta']['gt_det'][i].cpu().numpy().copy(),
show_box=opt.reg_bbox, img_id='det_gt')
debugger.add_3d_detection(
batch['meta']['image_path'][i], dets_pred[i], calib[i],
center_thresh=opt.center_thresh, img_id='add_pred')
debugger.add_3d_detection(
batch['meta']['image_path'][i], dets_gt[i], calib[i],
center_thresh=opt.center_thresh, img_id='add_gt')
# debugger.add_bird_view(
# dets_pred[i], center_thresh=opt.center_thresh, img_id='bird_pred')
# debugger.add_bird_view(dets_gt[i], img_id='bird_gt')
debugger.add_bird_views(
dets_pred[i], dets_gt[i],
center_thresh=opt.center_thresh, img_id='bird_pred_gt')
# debugger.add_blend_img(img, pred, 'out', white=True)
debugger.compose_vis_add(
batch['meta']['image_path'][i], dets_pred[i], calib[i],
opt.center_thresh, pred, 'bird_pred_gt', img_id='out')
# debugger.add_img(img, img_id='out')
if opt.debug ==4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
示例10: debug
# 需要导入模块: from utils import debugger [as 别名]
# 或者: from utils.debugger import Debugger [as 别名]
def debug(self, batch, output, iter_id):
opt = self.opt
reg = output['reg'] if opt.reg_offset else None
hm_hp = output['hm_hp'] if opt.hm_hp else None
hp_offset = output['hp_offset'] if opt.reg_hp_offset else None
dets = multi_pose_decode(
output['hm'], output['wh'], output['hps'],
reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets[:, :, :4] *= opt.input_res / opt.output_res
dets[:, :, 5:39] *= opt.input_res / opt.output_res
dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])
dets_gt[:, :, :4] *= opt.input_res / opt.output_res
dets_gt[:, :, 5:39] *= opt.input_res / opt.output_res
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
debugger.add_img(img, img_id='out_pred')
for k in range(len(dets[i])):
if dets[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],
dets[i, k, 4], img_id='out_pred')
debugger.add_coco_hp(dets[i, k, 5:39], img_id='out_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt[i])):
if dets_gt[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],
dets_gt[i, k, 4], img_id='out_gt')
debugger.add_coco_hp(dets_gt[i, k, 5:39], img_id='out_gt')
if opt.hm_hp:
pred = debugger.gen_colormap_hp(output['hm_hp'][i].detach().cpu().numpy())
gt = debugger.gen_colormap_hp(batch['hm_hp'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
debugger.add_blend_img(img, gt, 'gt_hmhp')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
示例11: initLatent
# 需要导入模块: from utils import debugger [as 别名]
# 或者: from utils.debugger import Debugger [as 别名]
def initLatent(loader, model, Y, nViews, S, AVG = False):
model.eval()
nIters = len(loader)
N = loader.dataset.nImages
M = np.zeros((N, ref.J, 3))
bar = Bar('==>', max=nIters)
sum_sigma2 = 0
cnt_sigma2 = 1
for i, (input, target, meta) in enumerate(loader):
output = (model(torch.autograd.Variable(input)).data).cpu().numpy()
G = output.shape[0] / nViews
output = output.reshape(G, nViews, ref.J, 3)
if AVG:
for g in range(G):
id = int(meta[g * nViews, 1])
for j in range(nViews):
RR, tt = horn87(output[g, j].transpose(), output[g, 0].transpose())
MM = (np.dot(RR, output[g, j].transpose())).transpose().copy()
M[id] += MM.copy() / nViews
else:
for g in range(G):
#assert meta[g * nViews, 0] > 1 + ref.eps
p = np.zeros(nViews)
sigma2 = 0.1
for j in range(nViews):
for kk in range(Y.shape[0] / S):
k = kk * S
d = Dis(Y[k], output[g, j])
sum_sigma2 += d
cnt_sigma2 += 1
p[j] += np.exp(- d / 2 / sigma2)
id = int(meta[g * nViews, 1])
M[id] = output[g, p.argmax()]
if DEBUG and g == 0:
print 'M[id]', id, M[id], p.argmax()
debugger = Debugger()
for j in range(nViews):
RR, tt = horn87(output[g, j].transpose(), output[g, p.argmax()].transpose())
MM = (np.dot(RR, output[g, j].transpose())).transpose().copy()
debugger.addPoint3D(MM, 'b')
debugger.addImg(input[g * nViews + j].numpy().transpose(1, 2, 0), j)
debugger.showAllImg()
debugger.addPoint3D(M[id], 'r')
debugger.show3D()
Bar.suffix = 'Init : [{0:3}/{1:3}] | Total: {total:} | ETA: {eta:} | Dis: {dis:.6f}'.format(i, nIters, total=bar.elapsed_td, eta=bar.eta_td, dis = sum_sigma2 / cnt_sigma2)
bar.next()
bar.finish()
#print 'mean sigma2', sum_sigma2 / cnt_sigma2
return M