當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorboard_logger.log_value方法代碼示例

本文整理匯總了Python中tensorboard_logger.log_value方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorboard_logger.log_value方法的具體用法?Python tensorboard_logger.log_value怎麽用?Python tensorboard_logger.log_value使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorboard_logger的用法示例。


在下文中一共展示了tensorboard_logger.log_value方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: validate_caption_only

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def validate_caption_only(opt, val_loader, model):
    # compute the encoding for all the validation images and captions
    img_embs, cap_embs = encode_data(
        model, val_loader, opt.log_step, logging.info)

    # caption retrieval
    (r1, r5, r10, medr, meanr) = i2t_text_only(img_embs, cap_embs, measure=opt.measure)
    logging.info("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" %
                 (r1, r5, r10, medr, meanr))
    # sum of recalls to be used for early stopping
    currscore = r1 + r5 + r10

    # record metrics in tensorboard
    tb_logger.log_value('r1', r1, step=model.Eiters)
    tb_logger.log_value('r5', r5, step=model.Eiters)
    tb_logger.log_value('r10', r10, step=model.Eiters)
    tb_logger.log_value('medr', medr, step=model.Eiters)
    tb_logger.log_value('meanr', meanr, step=model.Eiters)
    tb_logger.log_value('rsum', currscore, step=model.Eiters)

    return currscore 
開發者ID:ExplorerFreda,項目名稱:VSE-C,代碼行數:23,代碼來源:train.py

示例2: __call__

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def __call__(self, job):
        import tensorboard_logger as tl 
        # id = job.id
        budget = job.kwargs['budget']
        # config = job.kwargs['config']
        timestamps = job.timestamps
        result = job.result
        exception = job.exception

        time_step = int(timestamps['finished'] - self.start_time)

        if result is not None:
            tl.log_value('BOHB/all_results', result['loss'] * -1, time_step)
            if result['loss'] < self.incumbent:
                self.incumbent = result['loss']
            tl.log_value('BOHB/incumbent_results', self.incumbent * -1, time_step) 
開發者ID:automl,項目名稱:Auto-PyTorch,代碼行數:18,代碼來源:optimization_algorithm.py

示例3: optimize_pipeline

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def optimize_pipeline(self, config, config_id, budget, optimize_start_time):
        """Fit the pipeline using the sampled hyperparameter configuration.
        
        Arguments:
            config {dict} -- The sampled hyperparameter configuration.
            config_id {tuple} -- An ID for the configuration. Assigned by BOHB.
            budget {float} -- The budget to evaluate the hyperparameter configuration.
            optimize_start_time {float} -- The time when optimization started.
        
        Returns:
            dict -- The result of fitting the pipeline.
        """
        try:
            self.autonet_logger.info("Fit optimization pipeline")
            return self.pipeline.fit_pipeline(hyperparameter_config=config, pipeline_config=self.pipeline_config, 
                                            X_train=self.X_train, Y_train=self.Y_train, X_valid=self.X_valid, Y_valid=self.Y_valid, 
                                            budget=budget, budget_type=self.budget_type, max_budget=self.max_budget, optimize_start_time=optimize_start_time,
                                            refit=False, rescore=False, hyperparameter_config_id=config_id, dataset_info=self.dataset_info)
        except Exception as e:
            if 'use_tensorboard_logger' in self.pipeline_config and self.pipeline_config['use_tensorboard_logger']:            
                import tensorboard_logger as tl
                tl.log_value('Exceptions/' + str(e), budget, int(time.time()))
            self.autonet_logger.info(str(e))
            raise e 
開發者ID:automl,項目名稱:Auto-PyTorch,代碼行數:26,代碼來源:worker.py

示例4: optimize_pipeline

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def optimize_pipeline(self, config, budget, config_id, random_state):
        
        random.setstate(random_state)

        if self.permutations is not None:
            current_sh_run = config_id[0]
            self.pipeline_config["dataset_order"] = self.permutations[current_sh_run%len(self.permutations)].tolist()

        try:
            self.autonet_logger.info("Fit optimization pipeline")
            return self.pipeline.fit_pipeline(hyperparameter_config=config, pipeline_config=self.pipeline_config, 
                                            X_train=self.X_train, Y_train=self.Y_train, X_valid=self.X_valid, Y_valid=self.Y_valid, 
                                            budget=budget, budget_type=self.budget_type, max_budget=self.max_budget, 
                                            config_id=config_id, working_directory=self.working_directory), random.getstate()
        except Exception as e:
            if 'use_tensorboard_logger' in self.pipeline_config and self.pipeline_config['use_tensorboard_logger']:            
                import tensorboard_logger as tl
                tl.log_value('Exceptions/' + str(e), budget, int(time.time()))
            #self.autonet_logger.exception('Exception occurred')
            raise e 
開發者ID:automl,項目名稱:Auto-PyTorch,代碼行數:22,代碼來源:worker_no_timelimit.py

示例5: log_value

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def log_value(self, name, value):
        log_value(name, value, self.global_step)
        return self 
開發者ID:priba,項目名稱:nmp_qc,代碼行數:5,代碼來源:LogMetric.py

示例6: validate

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def validate(opt, val_loader, model):
    # compute the encoding for all the validation images and captions
    img_embs, cap_embs = encode_data(
        model, val_loader, opt.log_step, logging.info)

    # caption retrieval
    (r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, measure=opt.measure)
    logging.info("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" %
                 (r1, r5, r10, medr, meanr))
    # image retrieval
    (r1i, r5i, r10i, medri, meanr) = t2i(
        img_embs, cap_embs, measure=opt.measure)
    logging.info("Text to image: %.1f, %.1f, %.1f, %.1f, %.1f" %
                 (r1i, r5i, r10i, medri, meanr))
    # sum of recalls to be used for early stopping
    currscore = r1 + r5 + r10 + r1i + r5i + r10i

    # record metrics in tensorboard
    tb_logger.log_value('r1', r1, step=model.Eiters)
    tb_logger.log_value('r5', r5, step=model.Eiters)
    tb_logger.log_value('r10', r10, step=model.Eiters)
    tb_logger.log_value('medr', medr, step=model.Eiters)
    tb_logger.log_value('meanr', meanr, step=model.Eiters)
    tb_logger.log_value('r1i', r1i, step=model.Eiters)
    tb_logger.log_value('r5i', r5i, step=model.Eiters)
    tb_logger.log_value('r10i', r10i, step=model.Eiters)
    tb_logger.log_value('medri', medri, step=model.Eiters)
    tb_logger.log_value('meanr', meanr, step=model.Eiters)
    tb_logger.log_value('rsum', currscore, step=model.Eiters)

    return currscore 
開發者ID:ExplorerFreda,項目名稱:VSE-C,代碼行數:33,代碼來源:train.py

示例7: train

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def train(opt, train_loader, model, epoch):
    # average meters to record the training statistics
    batch_time = AverageMeter()
    data_time = AverageMeter()
    train_logger = LogCollector()

    # switch to train mode
    model.train_start()

    progbar = Progbar(len(train_loader.dataset))
    end = time.time()
    for i, train_data in enumerate(train_loader):

        # measure data loading time
        data_time.update(time.time() - end)

        # make sure train logger is used
        model.logger = train_logger

        # Update the model
        b_size, loss = model.train_emb(*train_data)

        progbar.add(b_size, values=[('loss', loss)])

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # Record logs in tensorboard
        tb_logger.log_value('epoch', epoch, step=model.Eiters)
        tb_logger.log_value('step', i, step=model.Eiters)
        tb_logger.log_value('batch_time', batch_time.val, step=model.Eiters)
        tb_logger.log_value('data_time', data_time.val, step=model.Eiters)
        model.logger.tb_log(tb_logger, step=model.Eiters) 
開發者ID:danieljf24,項目名稱:dual_encoding,代碼行數:36,代碼來源:trainer.py

示例8: log_value

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def log_value(harn, key, value, n_iter):
        if False:
            print('{}={} @ {}'.format(key, value, n_iter))
        if tensorboard_logger:
            tensorboard_logger.log_value(key, value, n_iter) 
開發者ID:Erotemic,項目名稱:ibeis,代碼行數:7,代碼來源:fit_harness.py

示例9: train_epoch

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def train_epoch(harn):
        ave_metrics = defaultdict(lambda: 0)

        # change learning rate (modified optimizer inplace)
        lr = harn.lr_scheduler(harn.epoch, harn.optimizer)

        # train batch
        for batch_idx, input_batch in enumerate(harn.train_loader):
            input_batch = harn._to_xpu(*input_batch)

            # print('Begin batch {}'.format(batch_idx))
            t_cur_metrics = harn.train_batch(input_batch)

            for k, v in t_cur_metrics.items():
                ave_metrics[k] += v

            # display training info
            if (batch_idx + 1) % harn.config['displayInterval'] == 0:
                for k in ave_metrics.keys():
                    ave_metrics[k] /= harn.config['displayInterval']

                n_train = len(harn.train_loader)
                harn.log('Epoch {0}: {1} / {2} | lr:{3} - tloss:{4:.5f} acc:{5:.2f} | sdis:{6:.3f} ddis:{7:.3f}'.format(
                    harn.epoch, batch_idx, n_train, lr,
                    ave_metrics['loss'], ave_metrics['accuracy'],
                    ave_metrics['pos_dist'], ave_metrics['neg_dist']))

                iter_idx = harn.epoch * n_train + batch_idx
                for key, value in ave_metrics.items():
                    harn.log_value('train ' + key, value, iter_idx)

                # diagnoseGradients(model.parameters())
                for k in ave_metrics.keys():
                    ave_metrics[k] = 0 
開發者ID:Erotemic,項目名稱:ibeis,代碼行數:36,代碼來源:fit_harness.py

示例10: validation_epoch

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def validation_epoch(harn):
        ave_metrics = defaultdict(lambda: 0)

        final_metrics = ave_metrics.copy()

        for vali_idx, input_batch in enumerate(harn.vali_loader):
            input_batch = harn._to_xpu(*input_batch)

            # print('Begin batch {}'.format(vali_idx))
            v_cur_metrics = harn.validation_batch(input_batch)

            for k, v in v_cur_metrics.items():
                ave_metrics[k] += v
                final_metrics[k] += v

            if (vali_idx + 1) % harn.config['vail_displayInterval'] == 0:
                for k in ave_metrics.keys():
                    ave_metrics[k] /= harn.config['displayInterval']

                harn.log('Epoch {0}: {1} / {2} | vloss:{3:.5f} acc:{4:.2f} | sdis:{5:.3f} ddis:{6:.3f}'.format(
                    harn.epoch, vali_idx, len(harn.vali_loader),
                    ave_metrics['loss'], ave_metrics['accuracy'],
                    ave_metrics['pos_dist'], ave_metrics['neg_dist']))

                for k in ave_metrics.keys():
                    ave_metrics[k] = 0

        for k in final_metrics.keys():
            final_metrics[k] /= len(harn.vali_loader)
        harn.log('Epoch {0}: final vloss:{1:.5f} acc:{2:.2f} | sdis:{3:.3f} ddis:{4:.3f}'.format(
            harn.epoch, final_metrics['loss'], final_metrics['accuracy'],
            final_metrics['pos_dist'], final_metrics['neg_dist']))

        iter_idx = harn.epoch * len(harn.vali_loader) + vali_idx
        for key, value in final_metrics.items():
            harn.log_value('validation ' + key, value, iter_idx)

    # def display_metrics():
    #     pass 
開發者ID:Erotemic,項目名稱:ibeis,代碼行數:41,代碼來源:fit_harness.py

示例11: train_one_epoch

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def train_one_epoch(model, train_loader, optimizer, epoch, lr_scheduler, total_it, tb_log, log_f):
    model.train()
    log_print('===============TRAIN EPOCH %d================' % epoch, log_f=log_f)
    loss_func = DiceLoss(ignore_target=-1)

    for it, batch in enumerate(train_loader):
        optimizer.zero_grad()

        pts_input, cls_labels = batch['pts_input'], batch['cls_labels']
        pts_input = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
        cls_labels = torch.from_numpy(cls_labels).cuda(non_blocking=True).long().view(-1)

        pred_cls = model(pts_input)
        pred_cls = pred_cls.view(-1)

        loss = loss_func(pred_cls, cls_labels)
        loss.backward()
        clip_grad_norm_(model.parameters(), 1.0)
        optimizer.step()

        total_it += 1

        pred_class = (torch.sigmoid(pred_cls) > FG_THRESH)
        fg_mask = cls_labels > 0
        correct = ((pred_class.long() == cls_labels) & fg_mask).float().sum()
        union = fg_mask.sum().float() + (pred_class > 0).sum().float() - correct
        iou = correct / torch.clamp(union, min=1.0)

        cur_lr = lr_scheduler.get_lr()[0]
        tb_log.log_value('learning_rate', cur_lr, epoch)
        if tb_log is not None:
            tb_log.log_value('train_loss', loss, total_it)
            tb_log.log_value('train_fg_iou', iou, total_it)

        log_print('training epoch %d: it=%d/%d, total_it=%d, loss=%.5f, fg_iou=%.3f, lr=%f' %
                  (epoch, it, len(train_loader), total_it, loss.item(), iou.item(), cur_lr), log_f=log_f)

    return total_it 
開發者ID:sshaoshuai,項目名稱:Pointnet2.PyTorch,代碼行數:40,代碼來源:train_and_eval.py

示例12: eval_one_epoch

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def eval_one_epoch(model, eval_loader, epoch, tb_log=None, log_f=None):
    model.train()
    log_print('===============EVAL EPOCH %d================' % epoch, log_f=log_f)

    iou_list = []
    for it, batch in enumerate(eval_loader):
        pts_input, cls_labels = batch['pts_input'], batch['cls_labels']
        pts_input = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
        cls_labels = torch.from_numpy(cls_labels).cuda(non_blocking=True).long().view(-1)

        pred_cls = model(pts_input)
        pred_cls = pred_cls.view(-1)

        pred_class = (torch.sigmoid(pred_cls) > FG_THRESH)
        fg_mask = cls_labels > 0
        correct = ((pred_class.long() == cls_labels) & fg_mask).float().sum()
        union = fg_mask.sum().float() + (pred_class > 0).sum().float() - correct
        iou = correct / torch.clamp(union, min=1.0)

        iou_list.append(iou.item())
        log_print('EVAL: it=%d/%d, iou=%.3f' % (it, len(eval_loader), iou), log_f=log_f)

    iou_list = np.array(iou_list)
    avg_iou = iou_list.mean()
    if tb_log is not None:
        tb_log.log_value('eval_fg_iou', avg_iou, epoch)

    log_print('\nEpoch %d: Average IoU (samples=%d): %.6f' % (epoch, iou_list.__len__(), avg_iou), log_f=log_f)
    return avg_iou 
開發者ID:sshaoshuai,項目名稱:Pointnet2.PyTorch,代碼行數:31,代碼來源:train_and_eval.py

示例13: run_epoch

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def run_epoch(model, optimizer, train_ldr, it, avg_loss):

    model_t = 0.0; data_t = 0.0
    end_t = time.time()
    tq = tqdm.tqdm(train_ldr)
    for batch in tq:
        start_t = time.time()
        optimizer.zero_grad()
        loss = model.loss(batch)
        loss.backward()

        grad_norm = nn.utils.clip_grad_norm(model.parameters(), 200)
        loss = loss.data[0]

        optimizer.step()
        prev_end_t = end_t
        end_t = time.time()
        model_t += end_t - start_t
        data_t += start_t - prev_end_t

        exp_w = 0.99
        avg_loss = exp_w * avg_loss + (1 - exp_w) * loss
        tb.log_value('train_loss', loss, it)
        tq.set_postfix(iter=it, loss=loss,
                avg_loss=avg_loss, grad_norm=grad_norm,
                model_time=model_t, data_time=data_t)
        it += 1

    return it, avg_loss 
開發者ID:awni,項目名稱:speech,代碼行數:31,代碼來源:train.py

示例14: train

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def train(epoch):
    rnet.train()

    total = 0
    correct = 0
    train_loss = 0
    total_batch = 0

    for batch_idx, (inputs, targets) in tqdm.tqdm(enumerate(trainloader), total=len(trainloader)):
        inputs, targets = inputs.to(device), targets.to(device)

        probs = rnet(inputs, True)
        optimizer.zero_grad()
        loss = criterion(probs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = probs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        total_batch += 1

    print('E:%d Train Loss: %.3f Train Acc: %.3f LR %f'
          % (epoch,
             train_loss / total_batch,
             correct / total,
             optimizer.param_groups[0]['lr']))

    tensorboard_logger.log_value('train_acc', correct/total, epoch)
    tensorboard_logger.log_value('train_loss', train_loss / total_batch, epoch) 
開發者ID:jiweeo,項目名稱:pytorch-stochastic-depth,代碼行數:34,代碼來源:train.py

示例15: test

# 需要導入模塊: import tensorboard_logger [as 別名]
# 或者: from tensorboard_logger import log_value [as 別名]
def test(epoch):
    global best_test_acc
    rnet.eval()

    total = 0
    correct = 0
    test_loss = 0
    total_batch = 0

    for batch_idx, (inputs, targets) in tqdm.tqdm(enumerate(testloader), total=len(testloader)):
        inputs, targets = inputs.to(device), targets.to(device)

        probs = rnet(inputs)
        loss = criterion(probs, targets)

        test_loss += loss.item()
        _, predicted = probs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        total_batch += 1

    print('E:%d Test Loss: %.3f Test Acc: %.3f'
          % (epoch, test_loss / total_batch, correct / total))

    # save best model
    acc = 100.*correct/total

    if acc > best_test_acc:
        best_test_acc = acc
        print('saving best model...')
        state = {
            'net': rnet.state_dict(),
            'acc': acc,
            'epoch': epoch,
        }
        torch.save(state, 'resnet110.t7')
    tensorboard_logger.log_value('test_acc', acc, epoch)
    tensorboard_logger.log_value('test_loss', test_loss/total_batch, epoch) 
開發者ID:jiweeo,項目名稱:pytorch-stochastic-depth,代碼行數:41,代碼來源:train.py


注:本文中的tensorboard_logger.log_value方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。