本文整理匯總了Python中utils.adjust_learning_rate方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.adjust_learning_rate方法的具體用法?Python utils.adjust_learning_rate怎麽用?Python utils.adjust_learning_rate使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils
的用法示例。
在下文中一共展示了utils.adjust_learning_rate方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import adjust_learning_rate [as 別名]
def main():
global opt, best_err1
opt = parser.parse_args()
best_err1 = 1000000
print(opt)
model = init.load_model(opt)
model, criterion, optimizer = init.setup(model,opt)
print(model)
trainer = train.Trainer(model, criterion, optimizer, opt, writer)
validator = train.Validator(model, criterion, opt, writer)
random.seed(opt.seed)
torch.manual_seed(opt.seed)
cudnn.deterministic = True
if opt.resume:
if os.path.isfile(opt.resume):
model, optimizer, opt, best_err1 = init.resumer(opt, model, optimizer)
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
cudnn.benchmark = True
dataloader = ld.GazeFollow(opt)
train_loader = dataloader.train_loader
val_loader = dataloader.val_loader
for epoch in range(opt.start_epoch, opt.epochs):
utils.adjust_learning_rate(opt, optimizer, epoch)
print("Starting epoch number:", epoch+1, "Learning rate:", optimizer.param_groups[0]["lr"])
if opt.testOnly == False:
trainer.train(train_loader, epoch, opt)
err = validator.validate(val_loader, epoch, opt)
best_err1 = min(err, best_err1)
if epoch % 10 == 0:
init.save_checkpoint(opt, model, optimizer, best_err1, epoch)
print('Best error: [{0:.3f}]\t'.format(best_err1))
示例2: train_splitted
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import adjust_learning_rate [as 別名]
def train_splitted(num_tasks, bayesian=True, net_type='lenet'):
assert 10 % num_tasks == 0
# Hyper Parameter settings
train_ens = cfg.train_ens
valid_ens = cfg.valid_ens
n_epochs = cfg.n_epochs
lr_start = cfg.lr_start
if bayesian:
ckpt_dir = f"checkpoints/MNIST/bayesian/splitted/{num_tasks}-tasks/"
else:
ckpt_dir = f"checkpoints/MNIST/frequentist/splitted/{num_tasks}-tasks/"
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir, exist_ok=True)
loaders, datasets = mix_utils.get_splitmnist_dataloaders(num_tasks, return_datasets=True)
models = mix_utils.get_splitmnist_models(num_tasks, bayesian=bayesian, pretrained=False, net_type=net_type)
for task in range(1, num_tasks + 1):
print(f"Training task-{task}..")
trainset, testset, _, _ = datasets[task-1]
train_loader, valid_loader, _ = loaders[task-1]
net = models[task-1]
net = net.to(device)
ckpt_name = ckpt_dir + f"model_{net_type}_{num_tasks}.{task}.pt"
criterion = (metrics.ELBO(len(trainset)) if bayesian else nn.CrossEntropyLoss()).to(device)
optimizer = Adam(net.parameters(), lr=lr_start)
valid_loss_max = np.Inf
for epoch in range(n_epochs): # loop over the dataset multiple times
utils.adjust_learning_rate(optimizer, metrics.lr_linear(epoch, 0, n_epochs, lr_start))
if bayesian:
train_loss, train_acc, train_kl = train_bayesian(net, optimizer, criterion, train_loader, num_ens=train_ens)
valid_loss, valid_acc = validate_bayesian(net, criterion, valid_loader, num_ens=valid_ens)
print('Epoch: {} \tTraining Loss: {:.4f} \tTraining Accuracy: {:.4f} \tValidation Loss: {:.4f} \tValidation Accuracy: {:.4f} \ttrain_kl_div: {:.4f}'.format(
epoch, train_loss, train_acc, valid_loss, valid_acc, train_kl))
else:
train_loss, train_acc = train_frequentist(net, optimizer, criterion, train_loader)
valid_loss, valid_acc = validate_frequentist(net, criterion, valid_loader)
print('Epoch: {} \tTraining Loss: {:.4f} \tTraining Accuracy: {:.4f} \tValidation Loss: {:.4f} \tValidation Accuracy: {:.4f}'.format(
epoch, train_loss, train_acc, valid_loss, valid_acc))
# save model if validation accuracy has increased
if valid_loss <= valid_loss_max:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_max, valid_loss))
torch.save(net.state_dict(), ckpt_name)
valid_loss_max = valid_loss
print(f"Done training task-{task}")
示例3: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import adjust_learning_rate [as 別名]
def main():
global opt, best_prec1
opt = parser.parse_args()
opt.logdir = opt.logdir+'/'+opt.name
logger = None#Logger(opt.logdir)
opt.lr = opt.maxlr
print(opt)
best_prec1 = 0
cudnn.benchmark = True
model = init_model.load_model(opt)
if opt.model_def.startswith('alexnet') or opt.model_def.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
elif opt.ngpus > 1:
model = torch.nn.DataParallel(model).cuda()
print(model)
model, criterion, optimizer = init_model.setup(model,opt)
trainer = train.Trainer(model, criterion, optimizer, opt, logger)
validator = train.Validator(model, criterion, opt, logger)
if opt.resume:
if os.path.isfile(opt.resume):
model, optimizer, opt, best_acc = init_model.resumer(opt, model, optimizer)
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
dataloader = init_data.load_data(opt)
train_loader = dataloader.train_loader
#print(utils.get_mean_and_std(train_loader))
val_loader = dataloader.val_loader
for epoch in range(opt.start_epoch, opt.epochs):
utils.adjust_learning_rate(opt, optimizer, epoch)
print("Starting epoch number:",epoch,"Learning rate:", opt.lr)
if opt.testOnly == False:
trainer.train(train_loader, epoch, opt)
if opt.tensorboard:
logger.scalar_summary('learning_rate', opt.lr, epoch)
prec1 = validator.validate(val_loader, epoch, opt)
best_prec1 = max(prec1, best_prec1)
init_model.save_checkpoint(opt, model, optimizer, best_prec1, epoch)
print('Best Prec@1: [{0:.3f}]\t'.format(best_prec1))