本文整理汇总了Python中logger.AverageMeter方法的典型用法代码示例。如果您正苦于以下问题:Python logger.AverageMeter方法的具体用法?Python logger.AverageMeter怎么用?Python logger.AverageMeter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类logger
的用法示例。
在下文中一共展示了logger.AverageMeter方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def main():
global args
args = parser.parse_args()
args.output_dir = Path(args.output_dir)
args.gt_dir = Path(args.gt_dir)
error_names = ['epe_total', 'outliers']
errors = AverageMeter(i=len(error_names))
for i in tqdm(range(args.N)):
gt_flow_path = args.gt_dir.joinpath(str(i).zfill(6)+'_10.png')
output_flow_path = args.output_dir.joinpath(str(i).zfill(6)+'_10.png')
u_gt,v_gt,valid_gt = flow_io.flow_read_png(gt_flow_path)
u_pred,v_pred,valid_pred = flow_io.flow_read_png(output_flow_path)
_errors = compute_err(u_gt, v_gt, valid_gt, u_pred, v_pred, valid_pred)
errors.update(_errors)
print("Results")
print("\t {:>10}, {:>10} ".format(*error_names))
print("Errors \t {:10.4f}, {:10.4f}".format(*errors.avg))
示例2: validate
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def validate(val_loader, alice_net, bob_net, mod_net):
global args
accuracy = AverageMeter(i=3, precision=4)
mod_count = AverageMeter()
# switch to evaluate mode
alice_net.eval()
bob_net.eval()
mod_net.eval()
for i, (img, target) in enumerate(tqdm(val_loader)):
img_var = Variable(img.cuda(), volatile=True)
target_var = Variable(target.cuda(), volatile=True)
pred_alice = alice_net(img_var)
pred_bob = bob_net(img_var)
pred_mod = F.sigmoid(mod_net(img_var))
_ , pred_alice_label = torch.max(pred_alice.data, 1)
_ , pred_bob_label = torch.max(pred_bob.data, 1)
pred_label = (pred_mod.squeeze().data > 0.5).type_as(pred_alice_label) * pred_alice_label + (pred_mod.squeeze().data <= 0.5).type_as(pred_bob_label) * pred_bob_label
total_accuracy = (pred_label.cpu() == target).sum().item() / img.size(0)
alice_accuracy = (pred_alice_label.cpu() == target).sum().item() / img.size(0)
bob_accuracy = (pred_bob_label.cpu() == target).sum().item() / img.size(0)
accuracy.update([total_accuracy, alice_accuracy, bob_accuracy])
mod_count.update((pred_mod.cpu().data > 0.5).sum().item() / img.size(0))
return list(map(lambda x: 1-x, accuracy.avg)), ['Total', 'alice', 'bob'] , mod_count.avg
示例3: train
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def train(train_loader, model, criterion, optimizer, epoch, logger=None):
# Switch to train mode
model.train()
batch_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
# Start counting time
end = time.time()
for i, (im, cl) in enumerate(train_loader):
if torch.cuda.is_available():
im, cl = im.cuda(), cl.cuda()
op = model(im)
loss = criterion(op, cl)
acc = utils.accuracy(op.data, cl.data, topk=(1,))
losses.update(loss.item(), cl.size(0))
accuracies.update(acc[0].item(), cl.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % args.log_interval == 0:
print('[Train] Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Accuracy {acc.val:.4f} ({acc.avg:.4f})\t'
.format(epoch + 1, i + 1, len(train_loader), batch_time=batch_time, loss=losses, acc=accuracies))
示例4: validate
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def validate(valid_loader, model, criterion, epoch, logger=None):
# Switch to train mode
model.eval()
batch_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
# Start counting time
end = time.time()
for i, (im, cl) in enumerate(valid_loader):
if torch.cuda.is_available():
im, cl = im.cuda(), cl.cuda()
# compute output
op = model(im)
loss = criterion(op, cl)
acc = utils.accuracy(op.data, cl.data, topk=(1,))
losses.update(loss.item(), cl.size(0))
accuracies.update(acc[0].item(), cl.size(0))
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % args.log_interval == 0:
print('[Validation] Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Accuracy {acc.val:.4f} ({acc.avg:.4f})\t'
.format(epoch + 1, i + 1, len(valid_loader), batch_time=batch_time, loss=losses, acc=accuracies))
return accuracies.avg
示例5: validate_with_gt
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def validate_with_gt(args, val_loader, disp_net, epoch, logger, tb_writer, sample_nb_to_log=3):
global device
batch_time = AverageMeter()
error_names = ['abs_diff', 'abs_rel', 'sq_rel', 'a1', 'a2', 'a3']
errors = AverageMeter(i=len(error_names))
log_outputs = sample_nb_to_log > 0
# switch to evaluate mode
disp_net.eval()
end = time.time()
logger.valid_bar.update(0)
for i, (tgt_img, depth) in enumerate(val_loader):
tgt_img = tgt_img.to(device)
depth = depth.to(device)
# compute output
output_disp = disp_net(tgt_img)
output_depth = 1/output_disp[:,0]
if log_outputs and i < sample_nb_to_log:
if epoch == 0:
tb_writer.add_image('val Input/{}'.format(i), tensor2array(tgt_img[0]), 0)
depth_to_show = depth[0]
tb_writer.add_image('val target Depth Normalized/{}'.format(i),
tensor2array(depth_to_show, max_value=None),
epoch)
depth_to_show[depth_to_show == 0] = 1000
disp_to_show = (1/depth_to_show).clamp(0,10)
tb_writer.add_image('val target Disparity Normalized/{}'.format(i),
tensor2array(disp_to_show, max_value=None, colormap='magma'),
epoch)
tb_writer.add_image('val Dispnet Output Normalized/{}'.format(i),
tensor2array(output_disp[0], max_value=None, colormap='magma'),
epoch)
tb_writer.add_image('val Depth Output Normalized/{}'.format(i),
tensor2array(output_depth[0], max_value=None),
epoch)
errors.update(compute_errors(depth, output_depth))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
logger.valid_bar.update(i+1)
if i % args.print_freq == 0:
logger.valid_writer.write('valid: Time {} Abs Error {:.4f} ({:.4f})'.format(batch_time, errors.val[0], errors.avg[0]))
logger.valid_bar.update(len(val_loader))
return errors.avg, error_names
示例6: adjust_shifts
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def adjust_shifts(args, train_set, adjust_loader, pose_exp_net, epoch, logger, tb_writer):
batch_time = AverageMeter()
data_time = AverageMeter()
new_shifts = AverageMeter(args.sequence_length-1)
pose_exp_net.train()
poses = np.zeros(((len(adjust_loader)-1) * args.batch_size * (args.sequence_length-1),6))
mid_index = (args.sequence_length - 1)//2
target_values = np.abs(np.arange(-mid_index, mid_index + 1)) * (args.target_displacement)
target_values = np.concatenate([target_values[:mid_index], target_values[mid_index + 1:]])
end = time.time()
for i, (indices, tgt_img, ref_imgs, intrinsics, intrinsics_inv) in enumerate(adjust_loader):
# measure data loading time
data_time.update(time.time() - end)
tgt_img = tgt_img.to(device)
ref_imgs = [img.to(device) for img in ref_imgs]
# compute output
explainability_mask, pose_batch = pose_exp_net(tgt_img, ref_imgs)
if i < len(adjust_loader)-1:
step = args.batch_size*(args.sequence_length-1)
poses[i * step:(i+1) * step] = pose_batch.cpu().reshape(-1,6).numpy()
for index, pose in zip(indices, pose_batch):
displacements = pose[:,:3].norm(p=2, dim=1).cpu().numpy()
ratio = target_values / displacements
train_set.reset_shifts(index, ratio[:mid_index], ratio[mid_index:])
new_shifts.update(train_set.get_shifts(index))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
logger.train_bar.update(i)
if i % args.print_freq == 0:
logger.train_writer.write('Adjustement:'
'Time {} Data {} shifts {}'.format(batch_time, data_time, new_shifts))
prefix = 'train poses'
coeffs_names = ['tx', 'ty', 'tz']
if args.rotation_mode == 'euler':
coeffs_names.extend(['rx', 'ry', 'rz'])
elif args.rotation_mode == 'quat':
coeffs_names.extend(['qx', 'qy', 'qz'])
for i in range(poses.shape[1]):
tb_writer.add_histogram('{} {}'.format(prefix, coeffs_names[i]), poses[:,i], epoch)
return new_shifts.avg
示例7: main
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def main():
global args
args = parser.parse_args()
normalize = custom_transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
flow_loader_h, flow_loader_w = 256, 832
valid_flow_transform = custom_transforms.Compose([custom_transforms.Scale(h=flow_loader_h, w=flow_loader_w),
custom_transforms.ArrayToTensor(), normalize])
if args.dataset == "kitti2015":
val_flow_set = ValidationFlow(root='/home/anuragr/datasets/kitti/kitti2015',
sequence_length=5, transform=valid_flow_transform)
elif args.dataset == "kitti2012":
val_flow_set = ValidationFlowKitti2012(root='/is/ps2/aranjan/AllFlowData/kitti/kitti2012',
sequence_length=5, transform=valid_flow_transform)
val_flow_loader = torch.utils.data.DataLoader(val_flow_set, batch_size=1, shuffle=False,
num_workers=2, pin_memory=True, drop_last=True)
flow_net = getattr(models, args.flownet)(nlevels=args.nlevels).cuda()
if args.pretrained_flow:
print("=> using pre-trained weights from {}".format(args.pretrained_flow))
weights = torch.load(args.pretrained_flow)
flow_net.load_state_dict(weights['state_dict'])#, strict=False)
flow_net = flow_net.cuda()
flow_net.eval()
error_names = ['epe_total', 'epe_non_rigid', 'epe_rigid', 'outliers']
errors = AverageMeter(i=len(error_names))
for i, (tgt_img, ref_imgs, intrinsics, intrinsics_inv, flow_gt, obj_map) in enumerate(tqdm(val_flow_loader)):
tgt_img_var = Variable(tgt_img.cuda(), volatile=True)
if args.dataset=="kitti2015":
ref_imgs_var = [Variable(img.cuda(), volatile=True) for img in ref_imgs]
ref_img_var = ref_imgs_var[1:3]
elif args.dataset=="kitti2012":
ref_img_var = Variable(ref_imgs.cuda(), volatile=True)
flow_gt_var = Variable(flow_gt.cuda(), volatile=True)
# compute output
flow_fwd, flow_bwd, occ = flow_net(tgt_img_var, ref_img_var)
#epe = compute_epe(gt=flow_gt_var, pred=flow_fwd)
obj_map_gt_var = Variable(obj_map.cuda(), volatile=True)
obj_map_gt_var_expanded = obj_map_gt_var.unsqueeze(1).type_as(flow_fwd)
epe = compute_all_epes(flow_gt_var, flow_fwd, flow_fwd, (1-obj_map_gt_var_expanded) )
#print(i, epe)
errors.update(epe)
print("Averge EPE",errors.avg )
示例8: validate
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def validate(val_loader, alice_net, bob_net, mod_net, epoch, logger=None, output_writer=[]):
global args
batch_time = AverageMeter()
accuracy = AverageMeter(i=3, precision=4)
# switch to evaluate mode
alice_net.eval()
bob_net.eval()
mod_net.eval()
end = time.time()
for i, (img, target) in enumerate(val_loader):
img_var = Variable(img.cuda(), volatile=True)
target_var = Variable(target.cuda(), volatile=True)
pred_alice = alice_net(img_var)
pred_bob = bob_net(img_var)
pred_mod = F.sigmoid(mod_net(img_var))
_ , pred_alice_label = torch.max(pred_alice.data, 1)
_ , pred_bob_label = torch.max(pred_bob.data, 1)
pred_label = (pred_mod.squeeze().data > 0.5).type_as(pred_alice_label) * pred_alice_label + (pred_mod.squeeze().data <= 0.5).type_as(pred_bob_label) * pred_bob_label
total_accuracy = (pred_label.cpu() == target).sum() / img.size(0)
alice_accuracy = (pred_alice_label.cpu() == target).sum() / img.size(0)
bob_accuracy = (pred_bob_label.cpu() == target).sum() / img.size(0)
accuracy.update([total_accuracy, alice_accuracy, bob_accuracy])
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.log_terminal:
logger.valid_bar.update(i)
if i % args.print_freq == 0:
logger.valid_writer.write('valid: Time {} Accuray {}'.format(batch_time, accuracy))
if args.log_output:
output_writer.add_scalar('accuracy_alice', accuracy.avg[1], epoch)
output_writer.add_scalar('accuracy_bob', accuracy.avg[2], epoch)
output_writer.add_scalar('accuracy_total', accuracy.avg[0], epoch)
if args.log_terminal:
logger.valid_bar.update(len(val_loader))
return list(map(lambda x: 1-x, accuracy.avg)), ['Total loss', 'alice loss', 'bob loss']
示例9: validate_depth_with_gt
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def validate_depth_with_gt(val_loader, disp_net, epoch, logger, output_writers=[]):
global args
batch_time = AverageMeter()
error_names = ['abs_diff', 'abs_rel', 'sq_rel', 'a1', 'a2', 'a3']
errors = AverageMeter(i=len(error_names))
log_outputs = len(output_writers) > 0
# switch to evaluate mode
disp_net.eval()
end = time.time()
for i, (tgt_img, depth) in enumerate(val_loader):
tgt_img_var = Variable(tgt_img.cuda(), volatile=True)
output_disp = disp_net(tgt_img_var)
if args.spatial_normalize:
output_disp = spatial_normalize(output_disp)
output_depth = 1/output_disp
depth = depth.cuda()
# compute output
if log_outputs and i % 100 == 0 and i/100 < len(output_writers):
index = int(i//100)
if epoch == 0:
output_writers[index].add_image('val Input', tensor2array(tgt_img[0]), 0)
depth_to_show = depth[0].cpu()
output_writers[index].add_image('val target Depth', tensor2array(depth_to_show, max_value=10), epoch)
depth_to_show[depth_to_show == 0] = 1000
disp_to_show = (1/depth_to_show).clamp(0,10)
output_writers[index].add_image('val target Disparity Normalized', tensor2array(disp_to_show, max_value=None, colormap='bone'), epoch)
output_writers[index].add_image('val Dispnet Output Normalized', tensor2array(output_disp.data[0].cpu(), max_value=None, colormap='bone'), epoch)
output_writers[index].add_image('val Depth Output', tensor2array(output_depth.data[0].cpu(), max_value=10), epoch)
errors.update(compute_errors(depth, output_depth.data.squeeze(1)))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.log_terminal:
logger.valid_bar.update(i)
if i % args.print_freq == 0:
logger.valid_writer.write('valid: Time {} Abs Error {:.4f} ({:.4f})'.format(batch_time, errors.val[0], errors.avg[0]))
if args.log_terminal:
logger.valid_bar.update(len(val_loader))
return errors.avg, error_names
示例10: train
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def train(train_loader, sem_pcyc_model, epoch, args):
# Switch to train mode
sem_pcyc_model.train()
batch_time = AverageMeter()
losses_gen_adv = AverageMeter()
losses_gen_cyc = AverageMeter()
losses_gen_cls = AverageMeter()
losses_gen_reg = AverageMeter()
losses_gen = AverageMeter()
losses_disc_se = AverageMeter()
losses_disc_sk = AverageMeter()
losses_disc_im = AverageMeter()
losses_disc = AverageMeter()
losses_aut_enc = AverageMeter()
# Start counting time
time_start = time.time()
for i, (sk, im, cl) in enumerate(train_loader):
# Transfer sk and im to cuda
if torch.cuda.is_available():
sk, im = sk.cuda(), im.cuda()
# Optimize parameters
loss = sem_pcyc_model.optimize_params(sk, im, cl)
# Store losses for visualization
losses_aut_enc.update(loss['aut_enc'].item(), sk.size(0))
losses_gen_adv.update(loss['gen_adv'].item(), sk.size(0))
losses_gen_cyc.update(loss['gen_cyc'].item(), sk.size(0))
losses_gen_cls.update(loss['gen_cls'].item(), sk.size(0))
losses_gen_reg.update(loss['gen_reg'].item(), sk.size(0))
losses_gen.update(loss['gen'].item(), sk.size(0))
losses_disc_se.update(loss['disc_se'].item(), sk.size(0))
losses_disc_sk.update(loss['disc_sk'].item(), sk.size(0))
losses_disc_im.update(loss['disc_im'].item(), sk.size(0))
losses_disc.update(loss['disc'].item(), sk.size(0))
# time
time_end = time.time()
batch_time.update(time_end - time_start)
time_start = time_end
if (i + 1) % args.log_interval == 0:
print('[Train] Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Gen. Loss {loss_gen.val:.4f} ({loss_gen.avg:.4f})\t'
'Disc. Loss {loss_disc.val:.4f} ({loss_disc.avg:.4f})\t'
.format(epoch + 1, i + 1, len(train_loader), batch_time=batch_time, loss_gen=losses_gen,
loss_disc=losses_disc))
losses = {'aut_enc': losses_aut_enc, 'gen_adv': losses_gen_adv, 'gen_cyc': losses_gen_cyc, 'gen_cls':
losses_gen_cls, 'gen_reg': losses_gen_reg, 'gen': losses_gen, 'disc_se': losses_disc_se, 'disc_sk':
losses_disc_sk, 'disc_im': losses_disc_im, 'disc': losses_disc}
return losses
示例11: train
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def train(epoch, data_loader, model, criterion, optimizer, args):
# switch to train mode
model.train()
# average meters to record the training statistics
losses = AverageMeter()
losses_dict = dict()
losses_dict['ranking_loss'] = AverageMeter()
if args.div_weight > 0:
losses_dict['div_loss'] = AverageMeter()
if args.mmd_weight > 0:
losses_dict['mmd_loss'] = AverageMeter()
for itr, data in enumerate(data_loader):
img, txt, txt_len, _ = data
if torch.cuda.is_available():
img, txt, txt_len = img.cuda(), txt.cuda(), txt_len.cuda()
# Forward pass and compute loss; _a: attention map, _r: residuals
img_emb, txt_emb, img_a, txt_a, img_r, txt_r = model.forward(img, txt, txt_len)
# Compute loss and update statstics
loss, loss_dict = criterion(img_emb, txt_emb, img_r, txt_r)
losses.update(loss.item())
for key, val in loss_dict.items():
losses_dict[key].update(val.item())
# Backprop
optimizer.zero_grad()
loss.backward()
if args.grad_clip > 0:
nn.utils.clip_grad.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
# Print log info
if itr > 0 and (itr % args.log_step == 0 or itr + 1 == len(data_loader)):
log_msg = 'loss: %.4f (%.4f)' %(losses.val, losses.avg)
for key, val in losses_dict.items():
log_msg += ', %s: %.4f, (%.4f)' %(key.replace('_loss',''), val.val, val.avg)
n = int(math.ceil(math.log(len(data_loader) + 1, 10)))
logging.info('[%d][%*d/%d] %s' %(epoch, n, itr, len(data_loader), log_msg))
log_msg = 'loss: %.4f' %(losses.avg)
for key, val in losses_dict.items():
log_msg += ', %s: %.4f' %(key.replace('_loss',''), val.avg)
exp_name = args.logger_name.split('/')[-1]
lock_and_write_to_file(args.log_file, '[%s][%d] %s' %(exp_name, epoch, log_msg))
del img_emb, txt_emb, img_a, txt_a, img_r, txt_r, loss
return losses.avg
示例12: validate_with_gt
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def validate_with_gt(args, val_loader, dpsnet, epoch, output_writers=[]):
batch_time = AverageMeter()
error_names = ['abs_rel', 'abs_diff', 'sq_rel', 'a1', 'a2', 'a3']
errors = AverageMeter(i=len(error_names))
log_outputs = len(output_writers) > 0
# switch to evaluate mode
dpsnet.eval()
end = time.time()
with torch.no_grad():
for i, (tgt_img, ref_imgs, ref_poses, intrinsics, intrinsics_inv, tgt_depth) in enumerate(val_loader):
tgt_img_var = Variable(tgt_img.cuda())
ref_imgs_var = [Variable(img.cuda()) for img in ref_imgs]
ref_poses_var = [Variable(pose.cuda()) for pose in ref_poses]
intrinsics_var = Variable(intrinsics.cuda())
intrinsics_inv_var = Variable(intrinsics_inv.cuda())
tgt_depth_var = Variable(tgt_depth.cuda())
pose = torch.cat(ref_poses_var,1)
output_depth = dpsnet(tgt_img_var, ref_imgs_var, pose, intrinsics_var, intrinsics_inv_var)
output_disp = args.nlabel*args.mindepth/(output_depth)
mask = (tgt_depth <= args.nlabel*args.mindepth) & (tgt_depth >= args.mindepth) & (tgt_depth == tgt_depth)
output = torch.squeeze(output_depth.data.cpu(),1)
if log_outputs and i % 100 == 0 and i/100 < len(output_writers):
index = int(i//100)
if epoch == 0:
output_writers[index].add_image('val Input', tensor2array(tgt_img[0]), 0)
depth_to_show = tgt_depth_var.data[0].cpu()
depth_to_show[depth_to_show > args.nlabel*args.mindepth] = args.nlabel*args.mindepth
disp_to_show = (args.nlabel*args.mindepth/depth_to_show)
disp_to_show[disp_to_show > args.nlabel] = 0
output_writers[index].add_image('val target Disparity Normalized', tensor2array(disp_to_show, max_value=args.nlabel, colormap='bone'), epoch)
output_writers[index].add_image('val target Depth Normalized', tensor2array(depth_to_show, max_value=args.nlabel*args.mindepth*0.3), epoch)
output_writers[index].add_image('val Dispnet Output Normalized', tensor2array(output_disp.data[0].cpu(), max_value=args.nlabel, colormap='bone'), epoch)
output_writers[index].add_image('val Depth Output', tensor2array(output_depth.data[0].cpu(), max_value=args.nlabel*args.mindepth*0.3), epoch)
errors.update(compute_errors_train(tgt_depth, output, mask))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('valid: Time {} Abs Error {:.4f} ({:.4f})'.format(batch_time, errors.val[0], errors.avg[0]))
return errors.avg, error_names
示例13: validate_without_gt
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def validate_without_gt(args, val_loader, disp_net, pose_net, epoch, logger, output_writers=[]):
global device
batch_time = AverageMeter()
losses = AverageMeter(i=4, precision=4)
log_outputs = len(output_writers) > 0
# switch to evaluate mode
disp_net.eval()
pose_net.eval()
end = time.time()
logger.valid_bar.update(0)
for i, (tgt_img, ref_imgs, intrinsics, intrinsics_inv) in enumerate(val_loader):
tgt_img = tgt_img.to(device)
ref_imgs = [img.to(device) for img in ref_imgs]
intrinsics = intrinsics.to(device)
intrinsics_inv = intrinsics_inv.to(device)
# compute output
tgt_depth = [1 / disp_net(tgt_img)]
ref_depths = []
for ref_img in ref_imgs:
ref_depth = [1 / disp_net(ref_img)]
ref_depths.append(ref_depth)
if log_outputs and i < len(output_writers):
if epoch == 0:
output_writers[i].add_image('val Input', tensor2array(tgt_img[0]), 0)
output_writers[i].add_image('val Dispnet Output Normalized',
tensor2array(1/tgt_depth[0][0], max_value=None, colormap='magma'),
epoch)
output_writers[i].add_image('val Depth Output',
tensor2array(tgt_depth[0][0], max_value=10),
epoch)
poses, poses_inv = compute_pose_with_inv(pose_net, tgt_img, ref_imgs)
loss_1, loss_3 = compute_photo_and_geometry_loss(tgt_img, ref_imgs, intrinsics, tgt_depth, ref_depths,
poses, poses_inv, args.num_scales, args.with_ssim,
args.with_mask, False, args.padding_mode)
loss_2 = compute_smooth_loss(tgt_depth, tgt_img, ref_depths, ref_imgs)
loss_1 = loss_1.item()
loss_2 = loss_2.item()
loss_3 = loss_3.item()
loss = loss_1
losses.update([loss, loss_1, loss_2, loss_3])
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
logger.valid_bar.update(i+1)
if i % args.print_freq == 0:
logger.valid_writer.write('valid: Time {} Loss {}'.format(batch_time, losses))
logger.valid_bar.update(len(val_loader))
return losses.avg, ['Total loss', 'Photo loss', 'Smooth loss', 'Consistency loss']
示例14: validate_with_gt
# 需要导入模块: import logger [as 别名]
# 或者: from logger import AverageMeter [as 别名]
def validate_with_gt(args, val_loader, disp_net, epoch, logger, output_writers=[]):
global device
batch_time = AverageMeter()
error_names = ['abs_diff', 'abs_rel', 'sq_rel', 'a1', 'a2', 'a3']
errors = AverageMeter(i=len(error_names))
log_outputs = len(output_writers) > 0
# switch to evaluate mode
disp_net.eval()
end = time.time()
logger.valid_bar.update(0)
for i, (tgt_img, depth) in enumerate(val_loader):
tgt_img = tgt_img.to(device)
depth = depth.to(device)
# check gt
if depth.nelement() == 0:
continue
# compute output
output_disp = disp_net(tgt_img)
output_depth = 1/output_disp[:, 0]
if log_outputs and i < len(output_writers):
if epoch == 0:
output_writers[i].add_image('val Input', tensor2array(tgt_img[0]), 0)
depth_to_show = depth[0]
output_writers[i].add_image('val target Depth',
tensor2array(depth_to_show, max_value=10),
epoch)
depth_to_show[depth_to_show == 0] = 1000
disp_to_show = (1/depth_to_show).clamp(0, 10)
output_writers[i].add_image('val target Disparity Normalized',
tensor2array(disp_to_show, max_value=None, colormap='magma'),
epoch)
output_writers[i].add_image('val Dispnet Output Normalized',
tensor2array(output_disp[0], max_value=None, colormap='magma'),
epoch)
output_writers[i].add_image('val Depth Output',
tensor2array(output_depth[0], max_value=10),
epoch)
if depth.nelement() != output_depth.nelement():
b, h, w = depth.size()
output_depth = torch.nn.functional.interpolate(output_depth.unsqueeze(1), [h, w]).squeeze(1)
errors.update(compute_errors(depth, output_depth, args.dataset))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
logger.valid_bar.update(i+1)
if i % args.print_freq == 0:
logger.valid_writer.write('valid: Time {} Abs Error {:.4f} ({:.4f})'.format(batch_time, errors.val[0], errors.avg[0]))
logger.valid_bar.update(len(val_loader))
return errors.avg, error_names