当前位置: 首页>>代码示例>>Python>>正文


Python metrics.AverageMeter方法代码示例

本文整理汇总了Python中utils.metrics.AverageMeter方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.AverageMeter方法的具体用法?Python metrics.AverageMeter怎么用?Python metrics.AverageMeter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils.metrics的用法示例。


在下文中一共展示了metrics.AverageMeter方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: validate

# 需要导入模块: from utils import metrics [as 别名]
# 或者: from utils.metrics import AverageMeter [as 别名]
def validate(self):
        """
        One epoch validation
        :return:
        """
        tqdm_batch = tqdm(self.data_loader.valid_loader, total=self.data_loader.valid_iterations,
                          desc="Valiation at -{}-".format(self.current_epoch))

        # set the model in training mode
        self.model.eval()

        epoch_loss = AverageMeter()
        top1_acc = AverageMeter()
        top5_acc = AverageMeter()

        for x, y in tqdm_batch:
            if self.cuda:
                x, y = x.cuda(async=self.config.async_loading), y.cuda(async=self.config.async_loading)

            x, y = Variable(x), Variable(y)
            # model
            pred = self.model(x)
            # loss
            cur_loss = self.loss(pred, y)
            if np.isnan(float(cur_loss.item())):
                raise ValueError('Loss is nan during validation...')

            top1, top5 = cls_accuracy(pred.data, y.data, topk=(1, 5))
            epoch_loss.update(cur_loss.item())
            top1_acc.update(top1.item(), x.size(0))
            top5_acc.update(top5.item(), x.size(0))

        self.logger.info("Validation results at epoch-" + str(self.current_epoch) + " | " + "loss: " + str(
            epoch_loss.avg) + "- Top1 Acc: " + str(top1_acc.val) + "- Top5 Acc: " + str(top5_acc.val))

        tqdm_batch.close()

        return top1_acc.avg 
开发者ID:moemen95,项目名称:Pytorch-Project-Template,代码行数:40,代码来源:condensenet.py

示例2: _reset_metrics

# 需要导入模块: from utils import metrics [as 别名]
# 或者: from utils.metrics import AverageMeter [as 别名]
def _reset_metrics(self):
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.total_loss = AverageMeter()
        self.total_inter, self.total_union = 0, 0
        self.total_correct, self.total_label = 0, 0 
开发者ID:yassouali,项目名称:pytorch_segmentation,代码行数:8,代码来源:trainer.py

示例3: main

# 需要导入模块: from utils import metrics [as 别名]
# 或者: from utils.metrics import AverageMeter [as 别名]
def main():
    opt = OptInit().get_args()
    logging.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.data_dir, opt.area, True, pre_transform=T.NormalizeScale())
    train_loader = DenseDataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
    opt.n_classes = train_loader.dataset.num_classes

    logging.info('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(DenseDeepGCN(opt)).to(opt.device)
    logging.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(model, opt.pretrained_model, opt.phase)
    logging.info(model)

    logging.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq, opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)

    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    # opt.test_metric = miou
    # opt.test_values = AverageMeter()
    opt.test_value = 0.

    logging.info('===> start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        train(model, train_loader, optimizer, scheduler, criterion, opt)
        # test_value = test(model, test_loader, test_metric, opt)
        scheduler.step()
    logging.info('Saving the final model.Finish!') 
开发者ID:lightaime,项目名称:deep_gcns_torch,代码行数:37,代码来源:train.py

示例4: main

# 需要导入模块: from utils import metrics [as 别名]
# 或者: from utils.metrics import AverageMeter [as 别名]
def main():
    opt = OptInit().get_args()
    logging.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.data_dir, test_area=5, train=True, pre_transform=T.NormalizeScale())
    if opt.multi_gpus:
        train_loader = DataListLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
    else:
        train_loader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
    opt.n_classes = train_loader.dataset.num_classes

    logging.info('===> Loading the network ...')
    model = SparseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(SparseDeepGCN(opt)).to(opt.device)
    logging.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(model, opt.pretrained_model, opt.phase)
    logging.info(model)

    logging.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq, opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)

    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    # opt.test_metric = miou
    # opt.test_values = AverageMeter()
    opt.test_value = 0.

    logging.info('===> start training ...')
    for _ in range(opt.total_epochs):
        opt.epoch += 1
        train(model, train_loader, optimizer, scheduler, criterion, opt)
        # test_value = test(model, test_loader, test_metric, opt)
        scheduler.step()
    logging.info('Saving the final model.Finish!') 
开发者ID:lightaime,项目名称:deep_gcns_torch,代码行数:40,代码来源:train.py

示例5: train_one_epoch

# 需要导入模块: from utils import metrics [as 别名]
# 或者: from utils.metrics import AverageMeter [as 别名]
def train_one_epoch(self):
        """
        One epoch training function
        """
        # Initialize tqdm
        tqdm_batch = tqdm(self.data_loader.train_loader, total=self.data_loader.train_iterations,
                          desc="Epoch-{}-".format(self.current_epoch))
        # Set the model to be in training mode
        self.model.train()
        # Initialize your average meters
        epoch_loss = AverageMeter()
        top1_acc = AverageMeter()
        top5_acc = AverageMeter()

        current_batch = 0
        for x, y in tqdm_batch:
            if self.cuda:
                x, y = x.cuda(async=self.config.async_loading), y.cuda(async=self.config.async_loading)

            # current iteration over total iterations
            progress = float(self.current_epoch * self.data_loader.train_iterations + current_batch) / (
                    self.config.max_epoch * self.data_loader.train_iterations)
            # progress = float(self.current_iteration) / (self.config.max_epoch * self.data_loader.train_iterations)
            x, y = Variable(x), Variable(y)
            lr = adjust_learning_rate(self.optimizer, self.current_epoch, self.config, batch=current_batch,
                                      nBatch=self.data_loader.train_iterations)
            # model
            pred = self.model(x, progress)
            # loss
            cur_loss = self.loss(pred, y)
            if np.isnan(float(cur_loss.item())):
                raise ValueError('Loss is nan during training...')
            # optimizer
            self.optimizer.zero_grad()
            cur_loss.backward()
            self.optimizer.step()

            top1, top5 = cls_accuracy(pred.data, y.data, topk=(1, 5))

            epoch_loss.update(cur_loss.item())
            top1_acc.update(top1.item(), x.size(0))
            top5_acc.update(top5.item(), x.size(0))

            self.current_iteration += 1
            current_batch += 1

            self.summary_writer.add_scalar("epoch/loss", epoch_loss.val, self.current_iteration)
            self.summary_writer.add_scalar("epoch/accuracy", top1_acc.val, self.current_iteration)
        tqdm_batch.close()

        self.logger.info("Training at epoch-" + str(self.current_epoch) + " | " + "loss: " + str(
            epoch_loss.val) + "- Top1 Acc: " + str(top1_acc.val) + "- Top5 Acc: " + str(top5_acc.val)) 
开发者ID:moemen95,项目名称:Pytorch-Project-Template,代码行数:54,代码来源:condensenet.py

示例6: train_one_epoch

# 需要导入模块: from utils import metrics [as 别名]
# 或者: from utils.metrics import AverageMeter [as 别名]
def train_one_epoch(self):
        """
        One epoch training function
        """
        # Initialize tqdm
        tqdm_batch = tqdm(self.data_loader.train_loader, total=self.data_loader.train_iterations,
                          desc="Epoch-{}-".format(self.current_epoch))

        # Set the model to be in training mode (for batchnorm)
        self.model.train()
        # Initialize your average meters
        epoch_loss = AverageMeter()
        metrics = IOUMetric(self.config.num_classes)

        for x, y in tqdm_batch:
            if self.cuda:
                x, y = x.pin_memory().cuda(async=self.config.async_loading), y.cuda(async=self.config.async_loading)
            x, y = Variable(x), Variable(y)
            # model
            pred = self.model(x)
            # loss
            cur_loss = self.loss(pred, y)
            if np.isnan(float(cur_loss.item())):
                raise ValueError('Loss is nan during training...')

            # optimizer
            self.optimizer.zero_grad()
            cur_loss.backward()
            self.optimizer.step()

            epoch_loss.update(cur_loss.item())
            _, pred_max = torch.max(pred, 1)
            metrics.add_batch(pred_max.data.cpu().numpy(), y.data.cpu().numpy())

            self.current_iteration += 1
            # exit(0)

        epoch_acc, _, epoch_iou_class, epoch_mean_iou, _ = metrics.evaluate()
        self.summary_writer.add_scalar("epoch-training/loss", epoch_loss.val, self.current_iteration)
        self.summary_writer.add_scalar("epoch_training/mean_iou", epoch_mean_iou, self.current_iteration)
        tqdm_batch.close()

        print("Training Results at epoch-" + str(self.current_epoch) + " | " + "loss: " + str(
            epoch_loss.val) + " - acc-: " + str(
            epoch_acc) + "- mean_iou: " + str(epoch_mean_iou) + "\n iou per class: \n" + str(
            epoch_iou_class)) 
开发者ID:moemen95,项目名称:Pytorch-Project-Template,代码行数:48,代码来源:erfnet.py

示例7: validate

# 需要导入模块: from utils import metrics [as 别名]
# 或者: from utils.metrics import AverageMeter [as 别名]
def validate(self):
        """
        One epoch validation
        :return:
        """
        tqdm_batch = tqdm(self.data_loader.valid_loader, total=self.data_loader.valid_iterations,
                          desc="Valiation at -{}-".format(self.current_epoch))

        # set the model in training mode
        self.model.eval()

        epoch_loss = AverageMeter()
        metrics = IOUMetric(self.config.num_classes)

        for x, y in tqdm_batch:
            if self.cuda:
                x, y = x.pin_memory().cuda(async=self.config.async_loading), y.cuda(async=self.config.async_loading)
            x, y = Variable(x), Variable(y)
            # model
            pred = self.model(x)
            # loss
            cur_loss = self.loss(pred, y)

            if np.isnan(float(cur_loss.item())):
                raise ValueError('Loss is nan during Validation.')

            _, pred_max = torch.max(pred, 1)
            metrics.add_batch(pred_max.data.cpu().numpy(), y.data.cpu().numpy())

            epoch_loss.update(cur_loss.item())

        epoch_acc, _, epoch_iou_class, epoch_mean_iou, _ = metrics.evaluate()
        self.summary_writer.add_scalar("epoch_validation/loss", epoch_loss.val, self.current_iteration)
        self.summary_writer.add_scalar("epoch_validation/mean_iou", epoch_mean_iou, self.current_iteration)

        print("Validation Results at epoch-" + str(self.current_epoch) + " | " + "loss: " + str(
            epoch_loss.val) + " - acc-: " + str(
            epoch_acc) + "- mean_iou: " + str(epoch_mean_iou) + "\n iou per class: \n" + str(
            epoch_iou_class))

        tqdm_batch.close()

        return epoch_mean_iou, epoch_loss.val 
开发者ID:moemen95,项目名称:Pytorch-Project-Template,代码行数:45,代码来源:erfnet.py

示例8: train

# 需要导入模块: from utils import metrics [as 别名]
# 或者: from utils.metrics import AverageMeter [as 别名]
def train(model, train_loader, val_loader, test_loader, opt):
    logging.info('===> Init the optimizer ...')
    criterion = nn.NLLLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)  # weight_decay=1e-4
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq, opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)
    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()

    best_val_part_miou = 0.
    best_test_part_miou = 0.
    test_part_miou_val_best = 0.

    logging.info('===> start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        # reset tracker
        opt.losses.reset()

        train_epoch(model, train_loader, optimizer, criterion, opt)
        val_part_iou, val_shape_mIoU = test(model, val_loader, opt)
        test_part_iou, test_shape_mIoU = test(model, test_loader, opt)

        scheduler.step()

        # ------------------  save ckpt
        if val_part_iou > best_val_part_miou:
            best_val_part_miou = val_part_iou
            test_part_miou_val_best = test_part_iou
            logging.info("Got a new best model on Validation with Part iou {:.4f}".format(best_val_part_miou))
            save_ckpt(model, optimizer, scheduler, opt, 'val_best')
        if test_part_iou > best_test_part_miou:
            best_test_part_miou = test_part_iou
            logging.info("Got a new best model on Test with Part iou {:.4f}".format(best_test_part_miou))
            save_ckpt(model, optimizer, scheduler, opt, 'test_best')

        # ------------------ show information
        logging.info(
            "===> Epoch {} Category {}-{}, Train Loss {:.4f}, mIoU on val {:.4f}, mIoU on test {:4f}, "
            "Best val mIoU {:.4f} Its test mIoU {:.4f}. Best test mIoU {:.4f}".format(
                opt.epoch, opt.category_no, opt.category, opt.losses.avg, val_part_iou, test_part_iou,
                best_val_part_miou, test_part_miou_val_best, best_test_part_miou))

        info = {
            'loss': opt.losses.avg,
            'val_part_miou': val_part_iou,
            'test_part_miou': test_part_iou,
            'lr': scheduler.get_lr()[0]
        }
        for tag, value in info.items():
            opt.logger.scalar_summary(tag, value, opt.step)

    save_ckpt(model, optimizer, scheduler, opt, 'last')
    logging.info(
        'Saving the final model.Finish! Category {}-{}. Best val part mIoU is {:.4f}. Its test mIoU is {:.4f}. '
        'Best test part mIoU is {:.4f}. Last test mIoU {:.4f} \n\n\n'.
            format(opt.category_no, opt.category, best_val_part_miou, test_part_miou_val_best,
                   best_test_part_miou, test_part_iou)) 
开发者ID:lightaime,项目名称:deep_gcns_torch,代码行数:60,代码来源:main.py

示例9: train

# 需要导入模块: from utils import metrics [as 别名]
# 或者: from utils.metrics import AverageMeter [as 别名]
def train():
    info_format = 'Epoch: [{}]\t loss: {: .6f} train mF1: {: .6f} \t val mF1: {: .6f}\t test mF1: {:.6f} \t ' \
                  'best val mF1: {: .6f}\t best test mF1: {:.6f}'
    opt.printer.info('===> Init the optimizer ...')
    criterion = torch.nn.BCEWithLogitsLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = ReduceLROnPlateau(optimizer, "min", patience=opt.lr_patience, verbose=True, factor=0.5, cooldown=30,
                                  min_lr=opt.lr/100)
    opt.scheduler = 'ReduceLROnPlateau'

    optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)

    opt.printer.info('===> Init Metric ...')
    opt.losses = AverageMeter()

    best_val_value = 0.
    best_test_value = 0.

    opt.printer.info('===> Start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        loss, train_value = train_step(model, train_loader, optimizer, criterion, opt)
        val_value = test(model, valid_loader, opt)
        test_value = test(model, test_loader, opt)

        if val_value > best_val_value:
            best_val_value = val_value
            save_ckpt(model, optimizer, scheduler, opt.epoch, opt.save_path, opt.post, name_post='val_best')
        if test_value > best_test_value:
            best_test_value = test_value
            save_ckpt(model, optimizer, scheduler, opt.epoch, opt.save_path, opt.post, name_post='test_best')

        opt.printer.info(info_format.format(opt.epoch, loss, train_value, val_value, test_value, best_val_value,
                                            best_test_value))

        if opt.scheduler == 'ReduceLROnPlateau':
            scheduler.step(opt.losses.avg)
        else:
            scheduler.step()

    opt.printer.info('Saving the final model.Finish!') 
开发者ID:lightaime,项目名称:deep_gcns_torch,代码行数:44,代码来源:main.py

示例10: train

# 需要导入模块: from utils import metrics [as 别名]
# 或者: from utils.metrics import AverageMeter [as 别名]
def train(train_loader, model, optimizer, epoch, mseloss, encoder_learn, gradient_clip):
    batch_time = metrics.AverageMeter()
    data_time = metrics.AverageMeter()
    losses = metrics.AverageMeter()
    psnr = metrics.AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (video_blocks, pad_block_size, block_shape) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        target = video_blocks.cuda(async=True)
        input_var = Variable(video_blocks.cuda())
        target_var = Variable(target)

        # compute output
        model.module.pad_frame_size = pad_block_size.numpy()
        model.module.patch_shape = block_shape.numpy()

        if encoder_learn:
            model.module.measurements.binarization()

        output, y = model(input_var)
        loss = mseloss.compute_loss(output, target_var)
        # record loss
        losses.update(loss.data[0], video_blocks.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()

        if encoder_learn:
            # restore real-valued weights
            model.module.measurements.restore()
            nn.utils.clip_grad_norm(model.module.parameters(), gradient_clip)
        else:
            nn.utils.clip_grad_norm(
                model.module.reconstruction.parameters(), gradient_clip)

        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            logging.info('Epoch: [{0}][{1}/{2}]\t'
                         'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                         'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                         'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(
                             epoch, i, len(train_loader), batch_time=batch_time,
                             data_time=data_time, loss=losses))
    return losses.avg 
开发者ID:miliadis,项目名称:DeepVideoCS,代码行数:58,代码来源:train.py

示例11: validate

# 需要导入模块: from utils import metrics [as 别名]
# 或者: from utils.metrics import AverageMeter [as 别名]
def validate(val_loader, model, encoder_learn):
    batch_time = metrics.AverageMeter()
    psnr = metrics.AverageMeter()

    # switch to evaluate mode
    model.cuda()
    model.eval()

    # binarize weights
    if encoder_learn:
        model.module.measurements.binarization()

    end = time.time()
    for i, (video_frames, pad_frame_size, patch_shape) in enumerate(val_loader):
        video_input = Variable(video_frames.cuda(async=True), volatile=True)
        print(val_loader.dataset.videos[i])

        # compute output
        model.module.pad_frame_size = pad_frame_size.numpy()
        model.module.patch_shape = patch_shape.numpy()
        reconstructed_video, y = model(video_input)

        # original video
        reconstructed_video = reconstructed_video.cpu().data.numpy()
        original_video = video_input.cpu().data.numpy()

        # measure accuracy and record loss
        psnr_video = metrics.psnr_accuracy(reconstructed_video, original_video)
        psnr.update(psnr_video, video_frames.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        logging.info('Test: [{0}/{1}]\t'
                     'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                     'PSNR {psnr.val:.3f} ({psnr.avg:.3f})'.format(
                         i + 1, len(val_loader), batch_time=batch_time,
                         psnr=psnr))

    # restore real-valued weights
    if encoder_learn:
        model.module.measurements.restore()

    print(' * PSNR {psnr.avg:.3f}'.format(psnr=psnr))

    return psnr.avg 
开发者ID:miliadis,项目名称:DeepVideoCS,代码行数:49,代码来源:train.py


注:本文中的utils.metrics.AverageMeter方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。