本文整理汇总了Python中optimizers.get_optimizer方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.get_optimizer方法的具体用法?Python optimizers.get_optimizer怎么用?Python optimizers.get_optimizer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类optimizers
的用法示例。
在下文中一共展示了optimizers.get_optimizer方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: import optimizers [as 别名]
# 或者: from optimizers import get_optimizer [as 别名]
def run(config):
train_dir = config.train.dir
model = get_model(config).cuda()
criterion = get_loss(config)
optimizer = get_optimizer(config, model.parameters())
checkpoint = utils.checkpoint.get_initial_checkpoint(config)
if checkpoint is not None:
last_epoch, step = utils.checkpoint.load_checkpoint(model, optimizer, checkpoint)
else:
last_epoch, step = -1, -1
print('from checkpoint: {} last epoch:{}'.format(checkpoint, last_epoch))
scheduler = get_scheduler(config, optimizer, last_epoch)
dataloaders = {split:get_dataloader(config, split, get_transform(config, split))
for split in ['train', 'val']}
writer = SummaryWriter(config.train.dir)
train(config, model, dataloaders, criterion, optimizer, scheduler,
writer, last_epoch+1)
示例2: run
# 需要导入模块: import optimizers [as 别名]
# 或者: from optimizers import get_optimizer [as 别名]
def run(config):
train_dir = config.train.dir
task = get_task(config)
optimizer = get_optimizer(config, task.get_model().parameters())
checkpoint = utils.checkpoint.get_initial_checkpoint(config)
if checkpoint is not None:
last_epoch, step = utils.checkpoint.load_checkpoint(task.get_model(),
optimizer,
checkpoint)
else:
last_epoch, step = -1, -1
print('from checkpoint: {} last epoch:{}'.format(checkpoint, last_epoch))
scheduler = get_scheduler(config, optimizer, last_epoch)
preprocess_opt = task.get_preprocess_opt()
dataloaders = {split:get_dataloader(config, split,
get_transform(config, split,
**preprocess_opt))
for split in ['train', 'dev']}
writer = SummaryWriter(config.train.dir)
train(config, task, dataloaders, optimizer, scheduler,
writer, last_epoch+1)
示例3: __init__
# 需要导入模块: import optimizers [as 别名]
# 或者: from optimizers import get_optimizer [as 别名]
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.writer = SummaryWriter(args.log_dir)
cudnn.enabled = True
# set up model
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = get_aux_net(args.network.arch)(aux_classes=args.aux_classes + 1, classes=args.n_classes)
self.model = self.model.to(self.device)
if args.mode == 'train':
# set up optimizer, lr scheduler and loss functions
optimizer = get_optimizer(self.args.training.optimizer)
optimizer_params = {k: v for k, v in self.args.training.optimizer.items() if k != "name"}
self.optimizer = optimizer(self.model.parameters(), **optimizer_params)
self.scheduler = get_scheduler(self.optimizer, self.args.training.lr_scheduler)
self.class_loss_func = nn.CrossEntropyLoss()
self.start_iter = 0
# resume
if args.training.resume:
self.load(args.model_dir + '/' + args.training.resume)
cudnn.benchmark = True
elif args.mode == 'val':
self.load(os.path.join(args.model_dir, args.validation.model))
else:
self.load(os.path.join(args.model_dir, args.testing.model))
示例4: search_once
# 需要导入模块: import optimizers [as 别名]
# 或者: from optimizers import get_optimizer [as 别名]
def search_once(config, policy):
model = get_model(config).cuda()
criterion = get_loss(config)
optimizer = get_optimizer(config, model.parameters())
scheduler = get_scheduler(config, optimizer, -1)
transforms = {'train': get_transform(config, 'train', params={'policies': policy}),
'val': get_transform(config, 'val')}
dataloaders = {split:get_dataloader(config, split, transforms[split])
for split in ['train', 'val']}
score_dict = train(config, model, dataloaders, criterion, optimizer, scheduler, None, 0)
return score_dict['f1_mavg']