当前位置: 首页>>代码示例>>Python>>正文


Python utils.adjust_learning_rate方法代码示例

本文整理汇总了Python中utils.adjust_learning_rate方法的典型用法代码示例。如果您正苦于以下问题:Python utils.adjust_learning_rate方法的具体用法?Python utils.adjust_learning_rate怎么用?Python utils.adjust_learning_rate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils的用法示例。


在下文中一共展示了utils.adjust_learning_rate方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import adjust_learning_rate [as 别名]
def main():
    global opt, best_err1
    opt = parser.parse_args()
    best_err1 = 1000000
    print(opt)

    model = init.load_model(opt)
    model, criterion, optimizer = init.setup(model,opt)
    print(model)

    trainer = train.Trainer(model, criterion, optimizer, opt, writer)
    validator = train.Validator(model, criterion, opt, writer)

    random.seed(opt.seed)
    torch.manual_seed(opt.seed)
    cudnn.deterministic = True

    if opt.resume:
        if os.path.isfile(opt.resume):
            model, optimizer, opt, best_err1 = init.resumer(opt, model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    cudnn.benchmark = True

    dataloader = ld.GazeFollow(opt)

    train_loader = dataloader.train_loader
    val_loader = dataloader.val_loader

    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, optimizer, epoch)
        print("Starting epoch number:", epoch+1, "Learning rate:", optimizer.param_groups[0]["lr"])

        if opt.testOnly == False:
            trainer.train(train_loader, epoch, opt)

        err = validator.validate(val_loader, epoch, opt)
        best_err1 = min(err, best_err1)

        if epoch % 10 == 0:
            init.save_checkpoint(opt, model, optimizer, best_err1, epoch)

        print('Best error: [{0:.3f}]\t'.format(best_err1)) 
开发者ID:rohitgajawada,项目名称:Where-are-they-looking-PyTorch,代码行数:46,代码来源:main.py

示例2: train_splitted

# 需要导入模块: import utils [as 别名]
# 或者: from utils import adjust_learning_rate [as 别名]
def train_splitted(num_tasks, bayesian=True, net_type='lenet'):
    assert 10 % num_tasks == 0

    # Hyper Parameter settings
    train_ens = cfg.train_ens
    valid_ens = cfg.valid_ens
    n_epochs = cfg.n_epochs
    lr_start = cfg.lr_start

    if bayesian:
        ckpt_dir = f"checkpoints/MNIST/bayesian/splitted/{num_tasks}-tasks/"
    else:
        ckpt_dir = f"checkpoints/MNIST/frequentist/splitted/{num_tasks}-tasks/"
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir, exist_ok=True)

    loaders, datasets = mix_utils.get_splitmnist_dataloaders(num_tasks, return_datasets=True)
    models = mix_utils.get_splitmnist_models(num_tasks, bayesian=bayesian, pretrained=False, net_type=net_type)

    for task in range(1, num_tasks + 1):
        print(f"Training task-{task}..")
        trainset, testset, _, _ = datasets[task-1]
        train_loader, valid_loader, _ = loaders[task-1]
        net = models[task-1]
        net = net.to(device)
        ckpt_name = ckpt_dir + f"model_{net_type}_{num_tasks}.{task}.pt"

        criterion = (metrics.ELBO(len(trainset)) if bayesian else nn.CrossEntropyLoss()).to(device)
        optimizer = Adam(net.parameters(), lr=lr_start)
        valid_loss_max = np.Inf
        for epoch in range(n_epochs):  # loop over the dataset multiple times
            utils.adjust_learning_rate(optimizer, metrics.lr_linear(epoch, 0, n_epochs, lr_start))

            if bayesian:
                train_loss, train_acc, train_kl = train_bayesian(net, optimizer, criterion, train_loader, num_ens=train_ens)
                valid_loss, valid_acc = validate_bayesian(net, criterion, valid_loader, num_ens=valid_ens)
                print('Epoch: {} \tTraining Loss: {:.4f} \tTraining Accuracy: {:.4f} \tValidation Loss: {:.4f} \tValidation Accuracy: {:.4f} \ttrain_kl_div: {:.4f}'.format(
                    epoch, train_loss, train_acc, valid_loss, valid_acc, train_kl))
            else:
                train_loss, train_acc = train_frequentist(net, optimizer, criterion, train_loader)
                valid_loss, valid_acc = validate_frequentist(net, criterion, valid_loader)
                print('Epoch: {} \tTraining Loss: {:.4f} \tTraining Accuracy: {:.4f} \tValidation Loss: {:.4f} \tValidation Accuracy: {:.4f}'.format(
                    epoch, train_loss, train_acc, valid_loss, valid_acc))

            # save model if validation accuracy has increased
            if valid_loss <= valid_loss_max:
                print('Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...'.format(
                    valid_loss_max, valid_loss))
                torch.save(net.state_dict(), ckpt_name)
                valid_loss_max = valid_loss

        print(f"Done training task-{task}") 
开发者ID:kumar-shridhar,项目名称:PyTorch-BayesianCNN,代码行数:54,代码来源:train_splitted.py

示例3: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import adjust_learning_rate [as 别名]
def main():
    global opt, best_prec1

    opt = parser.parse_args()
    opt.logdir = opt.logdir+'/'+opt.name
    logger = None#Logger(opt.logdir)
    opt.lr = opt.maxlr

    print(opt)
    best_prec1 = 0
    cudnn.benchmark = True
    model = init_model.load_model(opt)
    if opt.model_def.startswith('alexnet') or opt.model_def.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    elif opt.ngpus > 1:
        model = torch.nn.DataParallel(model).cuda()
    print(model)
    model, criterion, optimizer = init_model.setup(model,opt)

    trainer = train.Trainer(model, criterion, optimizer, opt, logger)
    validator = train.Validator(model, criterion, opt, logger)

    if opt.resume:
        if os.path.isfile(opt.resume):
            model, optimizer, opt, best_acc = init_model.resumer(opt, model, optimizer)
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    dataloader = init_data.load_data(opt)
    train_loader = dataloader.train_loader
    #print(utils.get_mean_and_std(train_loader))
    val_loader = dataloader.val_loader

    for epoch in range(opt.start_epoch, opt.epochs):
        utils.adjust_learning_rate(opt, optimizer, epoch)
        print("Starting epoch number:",epoch,"Learning rate:", opt.lr)

        if opt.testOnly == False:
            trainer.train(train_loader, epoch, opt)
        if opt.tensorboard:
            logger.scalar_summary('learning_rate', opt.lr, epoch)

        prec1 = validator.validate(val_loader, epoch, opt)
        best_prec1 = max(prec1, best_prec1)
        init_model.save_checkpoint(opt, model, optimizer, best_prec1, epoch)

        print('Best Prec@1: [{0:.3f}]\t'.format(best_prec1)) 
开发者ID:drimpossible,项目名称:Deep-Expander-Networks,代码行数:50,代码来源:main.py


注:本文中的utils.adjust_learning_rate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。