當前位置: 首頁>>代碼示例>>Python>>正文


Python logger.Logger方法代碼示例

本文整理匯總了Python中utils.logger.Logger方法的典型用法代碼示例。如果您正苦於以下問題:Python logger.Logger方法的具體用法?Python logger.Logger怎麽用?Python logger.Logger使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils.logger的用法示例。


在下文中一共展示了logger.Logger方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from utils import logger [as 別名]
# 或者: from utils.logger import Logger [as 別名]
def __init__(self, model, dataloader, run_id, config, ohe_headers):
        super(ESRNNTrainer, self).__init__()
        self.model = model.to(config['device'])
        self.config = config
        self.dl = dataloader
        self.ohe_headers = ohe_headers
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config['learning_rate'])
        # self.optimizer = torch.optim.ASGD(self.model.parameters(), lr=config['learning_rate'])
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer,
                                                         step_size=config['lr_anneal_step'],
                                                         gamma=config['lr_anneal_rate'])
        self.criterion = PinballLoss(self.config['training_tau'],
                                     self.config['output_size'] * self.config['batch_size'], self.config['device'])
        self.epochs = 0
        self.max_epochs = config['num_of_train_epochs']
        self.run_id = str(run_id)
        self.prod_str = 'prod' if config['prod'] else 'dev'
        self.log = Logger("../logs/train%s%s%s" % (self.config['variable'], self.prod_str, self.run_id))
        self.csv_save_path = None 
開發者ID:damitkwr,項目名稱:ESRNN-GPU,代碼行數:21,代碼來源:trainer.py

示例2: main

# 需要導入模塊: from utils import logger [as 別名]
# 或者: from utils.logger import Logger [as 別名]
def main():
	opt = opts().parse()
	now = datetime.datetime.now()
	logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat()))

	if opt.loadModel == 'none':

		model = inflate(opt).cuda()
	elif opt.loadModel == 'scratch':
		model = Pose3D(opt.nChannels, opt.nStack, opt.nModules, opt.numReductions, opt.nRegModules, opt.nRegFrames, ref.nJoints).cuda()
	else :
		model = torch.load(opt.loadModel).cuda()

	train_loader = torch.utils.data.DataLoader(
		h36m('train',opt),
		batch_size = opt.dataloaderSize,
		shuffle = False,
		num_workers = int(ref.nThreads)
	)

	optimizer = torch.optim.RMSprop(
		[{'params': model.parameters(), 'lr': opt.LRhg}], 
		alpha = ref.alpha, 
		eps = ref.epsilon, 
		weight_decay = ref.weightDecay, 
		momentum = ref.momentum
	)

	
	for epoch in range(1, opt.nEpochs + 1):
		loss_train, acc_train = train(epoch, opt, train_loader, model, optimizer)
		logger.scalar_summary('loss_train', loss_train, epoch)
		logger.scalar_summary('acc_train', acc_train, epoch)
		logger.write('{:8f} {:8f} \n'.format(loss_train, acc_train))

	logger.close() 
開發者ID:Naman-ntc,項目名稱:3D-HourGlass-Network,代碼行數:38,代碼來源:overfit.py

示例3: build_tensorboard

# 需要導入模塊: from utils import logger [as 別名]
# 或者: from utils.logger import Logger [as 別名]
def build_tensorboard(self):
        """Build a tensorboard logger."""
        from utils.logger import Logger
        self.logger = Logger(config.log_dir) 
開發者ID:ChenWu98,項目名稱:Point-Then-Operate,代碼行數:6,代碼來源:Experiment.py

示例4: main

# 需要導入模塊: from utils import logger [as 別名]
# 或者: from utils.logger import Logger [as 別名]
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create your data generator
    data = DataGenerator(config)
    
    # create an instance of the model you want
    model = ExampleModel(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = ExampleTrainer(sess, model, data, config, logger)
    #load model if exists
    model.load(sess)
    # here you train your model
    trainer.train() 
開發者ID:MrGemy95,項目名稱:Tensorflow-Project-Template,代碼行數:30,代碼來源:example.py

示例5: __init__

# 需要導入模塊: from utils import logger [as 別名]
# 或者: from utils.logger import Logger [as 別名]
def __init__(self, time_grain=u'ms',
                 verbose_in=True, verbose_out=True, verbose=True,
                 msg_in=u'', msg_out=u'', msg=u'', logfile=None):
        self.time_grain = time_grain
        self.verbose_in = verbose_in
        self.verbose_out = verbose_out
        self.verbose = verbose
        self.msg_in = msg_in
        self.msg_out = msg_out
        self.msg = msg
        if logfile:
            self.logger = Logger('flogger', log2console=False, log2file=True,
                                 logfile=logfile).get_logger()
        else:
            self.logger = clogger 
開發者ID:likaiguo,項目名稱:simhashpy,代碼行數:17,代碼來源:timer.py

示例6: main

# 需要導入模塊: from utils import logger [as 別名]
# 或者: from utils.logger import Logger [as 別名]
def main():
    # 創建日誌模塊
    logger = Logger()
    log_process = Process(target=run_log_process, args=(logger,))
    log_process.start()

    saveMainPid(os.getpid())

    # 檢查軟件更新
    checkUpdate(logger)

    # 創建策略引擎到界麵的隊列,發送資金數據
    eg2ui_q = Queue(10000)
    # 創建界麵到策略引擎的隊列,發送策略全路徑
    ui2eg_q = Queue(10000)

    # 創建策略引擎
    engine = StrategyEngine(logger, eg2ui_q, ui2eg_q)
    engine_process = Process(target=run_engine_process, args=(engine,))
    engine_process.start()

    control = Controller(logger, ui2eg_q, eg2ui_q)
    control.run()
    time.sleep(3)
    import atexit
    def exitHandler():
        control.receiveEgThread.stop()
        # 1. 先關閉策略進程, 現在策略進程會成為僵屍進程
        # todo 此處需要重載engine的terminate函數
        # 2. 關閉engine進程
        engine_process.terminate()
        engine_process.join()
        log_process.terminate()
        log_process.join()
    atexit.register(exitHandler) 
開發者ID:epolestar,項目名稱:equant,代碼行數:37,代碼來源:equant.py

示例7: main

# 需要導入模塊: from utils import logger [as 別名]
# 或者: from utils.logger import Logger [as 別名]
def main():
  now = datetime.datetime.now()
  logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat()))
  model, optimizer = getModel(opt)

  criterion = torch.nn.MSELoss()
  
  if opt.GPU > -1:
    print('Using GPU', opt.GPU)
    model = model.cuda(opt.GPU)
    criterion = criterion.cuda(opt.GPU)
  
  val_loader = torch.utils.data.DataLoader(
      Dataset(opt, 'val'), 
      batch_size = 1, 
      shuffle = True if opt.DEBUG > 1 else False,
      num_workers = 1
  )

  if opt.test:
    _, preds = val(0, opt, val_loader, model, criterion)
    torch.save({'opt': opt, 'preds': preds}, os.path.join(opt.saveDir, 'preds.pth'))
    return

  train_loader = torch.utils.data.DataLoader(
      Dataset(opt, 'train'), 
      batch_size = opt.trainBatch, 
      shuffle = True,
      num_workers = int(opt.nThreads)
  )

  for epoch in range(1, opt.nEpochs + 1):
    mark = epoch if opt.saveAllModels else 'last'
    log_dict_train, _ = train(epoch, opt, train_loader, model, criterion, optimizer)
    for k, v in log_dict_train.items():
      logger.scalar_summary('train_{}'.format(k), v, epoch)
      logger.write('{} {:8f} | '.format(k, v))
    if epoch % opt.valIntervals == 0:
      log_dict_val, preds = val(epoch, opt, val_loader, model, criterion)
      for k, v in log_dict_val.items():
        logger.scalar_summary('val_{}'.format(k), v, epoch)
        logger.write('{} {:8f} | '.format(k, v))
      saveModel(os.path.join(opt.saveDir, 'model_{}.checkpoint'.format(mark)), model) # optimizer
    logger.write('\n')
    if epoch % opt.dropLR == 0:
      lr = opt.LR * (0.1 ** (epoch // opt.dropLR))
      print('Drop LR to', lr)
      for param_group in optimizer.param_groups:
          param_group['lr'] = lr
  logger.close()
  torch.save(model.cpu(), os.path.join(opt.saveDir, 'model_cpu.pth')) 
開發者ID:xingyizhou,項目名稱:StarMap,代碼行數:53,代碼來源:main.py

示例8: main

# 需要導入模塊: from utils import logger [as 別名]
# 或者: from utils.logger import Logger [as 別名]
def main(args):
    # create checkpoint dir
    if not isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # create model
    model = network.__dict__[cfg.model](cfg.output_shape, cfg.num_class, pretrained = True)
    model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion1 = torch.nn.MSELoss().cuda() # for Global loss
    criterion2 = torch.nn.MSELoss(reduce=False).cuda() # for refine loss
    optimizer = torch.optim.Adam(model.parameters(),
                                lr = cfg.lr,
                                weight_decay=cfg.weight_decay)
    
    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            pretrained_dict = checkpoint['state_dict']
            model.load_state_dict(pretrained_dict)
            args.start_epoch = checkpoint['epoch']
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
            logger = Logger(join(args.checkpoint, 'log.txt'), resume=True)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:        
        logger = Logger(join(args.checkpoint, 'log.txt'))
        logger.set_names(['Epoch', 'LR', 'Train Loss'])

    cudnn.benchmark = True
    print('    Total params: %.2fMB' % (sum(p.numel() for p in model.parameters())/(1024*1024)*4))

    train_loader = torch.utils.data.DataLoader(
        MscocoMulti(cfg),
        batch_size=cfg.batch_size*args.num_gpus, shuffle=True,
        num_workers=args.workers, pin_memory=True) 

    for epoch in range(args.start_epoch, args.epochs):
        lr = adjust_learning_rate(optimizer, epoch, cfg.lr_dec_epoch, cfg.lr_gamma)
        print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr)) 

        # train for one epoch
        train_loss = train(train_loader, model, [criterion1, criterion2], optimizer)
        print('train_loss: ',train_loss)

        # append logger file
        logger.append([epoch + 1, lr, train_loss])

        save_model({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'optimizer' : optimizer.state_dict(),
        }, checkpoint=args.checkpoint)

    logger.close() 
開發者ID:GengDavid,項目名稱:pytorch-cpn,代碼行數:61,代碼來源:train.py

示例9: train

# 需要導入模塊: from utils import logger [as 別名]
# 或者: from utils.logger import Logger [as 別名]
def train(self, data_train, data_valid, enable_es=1):
        
        with tf.Session(graph=self.graph) as session:
            tf.set_random_seed(1234)
            
            logger = Logger(session, self.summary_dir)
            # here you initialize the tensorflow saver that will be used in saving the checkpoints.
            # max_to_keep: defaults to keeping the 5 most recent checkpoints of your model
            saver = tf.train.Saver()
            early_stopping = EarlyStopping()
            
            if(self.restore==1 and self.load(session, saver) ):
                num_epochs_trained = self.vae_graph.cur_epoch_tensor.eval(session)
                print('EPOCHS trained: ', num_epochs_trained)      
            else:
                print('Initizalizing Variables ...')
                tf.global_variables_initializer().run()
                
                   
            if(self.vae_graph.cur_epoch_tensor.eval(session) ==  self.epochs):
                return
            
            for cur_epoch in range(self.vae_graph.cur_epoch_tensor.eval(session), self.epochs + 1, 1):
        
                print('EPOCH: ', cur_epoch)
                self.current_epoch = cur_epoch
                # beta=utils.sigmoid(cur_epoch- 50)
                beta = 1.
                loss_tr, recons_tr, cond_prior_tr, L2_loss = self.train_epoch(session, logger, data_train, beta=beta)
                if np.isnan(loss_tr):
                    print ('Encountered NaN, stopping training. Please check the learning_rate settings and the momentum.')
                    print('Recons: ', recons_tr)
                    print('KL: ', cond_prior_tr)
                    sys.exit()
                    
                loss_val, recons_val, cond_prior_val = self.valid_epoch(session, logger, data_valid, beta=beta)
                
                print('TRAIN | Loss: ', loss_tr, ' | Recons: ', recons_tr, ' | KL: ', cond_prior_tr, ' | L2_loss: ', L2_loss)
                print('VALID | Loss: ', loss_val, ' | Recons: ', recons_val, ' | KL: ', cond_prior_val)
                
                if(cur_epoch>0 and cur_epoch % 10 == 0):
                    self.save(session, saver, self.vae_graph.global_step_tensor.eval(session))
                    z_matrix = self.vae_graph.get_z_matrix(session, data_valid.random_batch(self.batch_size))
                    np.savez(self.z_file, z_matrix)
                    
                session.run(self.vae_graph.increment_cur_epoch_tensor)
                
                #Early stopping
                if(enable_es==1 and early_stopping.stop(loss_val)):
                    print('Early Stopping!')
                    break
                    
        
            self.save(session,saver, self.vae_graph.global_step_tensor.eval(session))
            z_matrix = self.vae_graph.get_z_matrix(session, data_valid.random_batch(self.batch_size))
            np.savez(self.z_file, z_matrix)
        return 
開發者ID:psanch21,項目名稱:VAE-GMVAE,代碼行數:59,代碼來源:VAE_model.py

示例10: main

# 需要導入模塊: from utils import logger [as 別名]
# 或者: from utils.logger import Logger [as 別名]
def main():
    opt = opts().parse()
    now = datetime.datetime.now()
    logger = Logger(opt.saveDir, now.isoformat())
    model, optimizer = getModel(opt)
    criterion = torch.nn.MSELoss().cuda()

    # if opt.GPU > -1:
    #     print('Using GPU {}',format(opt.GPU))
    #     model = model.cuda(opt.GPU)
    #     criterion = criterion.cuda(opt.GPU)
    # dev = opt.device
    model = model.cuda()

    val_loader = torch.utils.data.DataLoader(
            MPII(opt, 'val'), 
            batch_size = 1, 
            shuffle = False,
            num_workers = int(ref.nThreads)
    )

    if opt.test:
        log_dict_train, preds = val(0, opt, val_loader, model, criterion)
        sio.savemat(os.path.join(opt.saveDir, 'preds.mat'), mdict = {'preds': preds})
        return
    # pyramidnet pretrain一次,先定義gen的訓練數據loader
    train_loader = torch.utils.data.DataLoader(
            MPII(opt, 'train'), 
            batch_size = opt.trainBatch, 
            shuffle = True if opt.DEBUG == 0 else False,
            num_workers = int(ref.nThreads)
    )
    # 調用train方法
    for epoch in range(1, opt.nEpochs + 1):
        log_dict_train, _ = train(epoch, opt, train_loader, model, criterion, optimizer)
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if epoch % opt.valIntervals == 0:
            log_dict_val, preds = val(epoch, opt, val_loader, model, criterion)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            #saveModel(model, optimizer, os.path.join(opt.saveDir, 'model_{}.checkpoint'.format(epoch)))
            torch.save(model, os.path.join(opt.saveDir, 'model_{}.pth'.format(epoch)))
            sio.savemat(os.path.join(opt.saveDir, 'preds_{}.mat'.format(epoch)), mdict = {'preds': preds})
        logger.write('\n')
        if epoch % opt.dropLR == 0:
            lr = opt.LR * (0.1 ** (epoch // opt.dropLR))
            print('Drop LR to {}'.format(lr))
            adjust_learning_rate(optimizer, lr)
    logger.close()
    torch.save(model.cpu(), os.path.join(opt.saveDir, 'model_cpu.pth')) 
開發者ID:IcewineChen,項目名稱:pytorch-PyraNet,代碼行數:55,代碼來源:main.py

示例11: main

# 需要導入模塊: from utils import logger [as 別名]
# 或者: from utils.logger import Logger [as 別名]
def main():
    log = logger.Logger(args.savemodel, name=args.logname)
    total_iters = 0

    for epoch in range(1, args.epochs+1):
        total_train_loss = 0
        adjust_learning_rate(optimizer,epoch)
        
        ## training ##
        for batch_idx, (imgL_crop, imgR_crop, disp_crop_L) in enumerate(TrainImgLoader):
            start_time = time.time() 
            loss,vis = train(imgL_crop,imgR_crop, disp_crop_L)
            print('Iter %d training loss = %.3f , time = %.2f' %(batch_idx, loss, time.time() - start_time))
            total_train_loss += loss

            if total_iters %10 == 0:
                log.scalar_summary('train/loss_batch',loss, total_iters)
            if total_iters %100 == 0:
                log.image_summary('train/left',imgL_crop[0:1],total_iters)
                log.image_summary('train/right',imgR_crop[0:1],total_iters)
                log.image_summary('train/gt0',disp_crop_L[0:1],total_iters)
                log.image_summary('train/entropy',vis['entropy'][0:1],total_iters)
                log.histo_summary('train/disparity_hist',vis['output3'], total_iters)
                log.histo_summary('train/gt_hist',np.asarray(disp_crop_L), total_iters)
                log.image_summary('train/output3',vis['output3'][0:1],total_iters)
                log.image_summary('train/output4',vis['output4'][0:1],total_iters)
                log.image_summary('train/output5',vis['output5'][0:1],total_iters)
                log.image_summary('train/output6',vis['output6'][0:1],total_iters)
                    
            total_iters += 1

            if (total_iters + 1)%2000==0:
                #SAVE
                savefilename = args.savemodel+'/'+args.logname+'/finetune_'+str(total_iters)+'.tar'
                torch.save({
                    'iters': total_iters,
                    'state_dict': model.state_dict(),
                    'train_loss': total_train_loss/len(TrainImgLoader),
                }, savefilename)

        log.scalar_summary('train/loss',total_train_loss/len(TrainImgLoader), epoch)
        torch.cuda.empty_cache() 
開發者ID:gengshan-y,項目名稱:high-res-stereo,代碼行數:44,代碼來源:train.py


注:本文中的utils.logger.Logger方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。