本文整理汇总了Python中torch.optim.lr_scheduler.ExponentialLR方法的典型用法代码示例。如果您正苦于以下问题:Python lr_scheduler.ExponentialLR方法的具体用法?Python lr_scheduler.ExponentialLR怎么用?Python lr_scheduler.ExponentialLR使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.optim.lr_scheduler
的用法示例。
在下文中一共展示了lr_scheduler.ExponentialLR方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_opt_sched
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def load_opt_sched(train_config, model):
inf_params = model.inference_parameters()
gen_params = model.generative_parameters()
inf_opt = Optimizer(train_config['optimizer'], inf_params,
lr=train_config['inference_learning_rate'],
clip_grad_norm=train_config['clip_grad_norm'])
inf_sched = ExponentialLR(inf_opt.opt, 0.999)
gen_opt = Optimizer(train_config['optimizer'], gen_params,
lr=train_config['generation_learning_rate'],
clip_grad_norm=train_config['clip_grad_norm'])
gen_sched = ExponentialLR(gen_opt.opt, 0.999)
return (inf_opt, gen_opt), (inf_sched, gen_sched)
示例2: set_params
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def set_params(self, transformer, validation_datagen):
self.validation_datagen = validation_datagen
self.model = transformer.model
self.optimizer = transformer.optimizer
self.loss_function = transformer.loss_function
self.lr_scheduler = ExponentialLR(self.optimizer, self.gamma, last_epoch=-1)
示例3: configure_lr_scheduler
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def configure_lr_scheduler(self, optimizer, cfg):
if cfg.SCHEDULER == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=cfg.STEPS[0], gamma=cfg.GAMMA)
elif cfg.SCHEDULER == 'multi_step':
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=cfg.STEPS, gamma=cfg.GAMMA)
elif cfg.SCHEDULER == 'exponential':
scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=cfg.GAMMA)
elif cfg.SCHEDULER == 'SGDR':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg.MAX_EPOCHS)
else:
AssertionError('scheduler can not be recognized.')
return scheduler
示例4: exponential
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def exponential(optimizer, last_epoch, gamma=0.995, **_):
return lr_scheduler.ExponentialLR(optimizer, gamma=gamma, last_epoch=last_epoch)
示例5: set_params
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def set_params(self, transformer, validation_datagen, *args, **kwargs):
self.validation_datagen = validation_datagen
self.model = transformer.model
self.optimizer = transformer.optimizer
self.loss_function = transformer.loss_function
self.lr_scheduler = ExponentialLR(self.optimizer, self.gamma, last_epoch=-1)
示例6: __init__
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def __init__(self, gamma, step_on_iteration=False):
super().__init__(
lambda opt: _scheduler.ExponentialLR(opt,
gamma),
step_on_iteration=step_on_iteration
)
示例7: __init__
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def __init__(self, net_path=None, **kwargs):
super(TrackerSiamFC, self).__init__('SiamFC', True)
self.cfg = self.parse_args(**kwargs)
# setup GPU device if available
self.cuda = torch.cuda.is_available()
self.device = torch.device('cuda:0' if self.cuda else 'cpu')
# setup model
self.net = Net(
backbone=AlexNetV1(),
head=SiamFC(self.cfg.out_scale))
ops.init_weights(self.net)
# load checkpoint if provided
if net_path is not None:
self.net.load_state_dict(torch.load(
net_path, map_location=lambda storage, loc: storage))
self.net = self.net.to(self.device)
# setup criterion
self.criterion = BalancedLoss()
# setup optimizer
self.optimizer = optim.SGD(
self.net.parameters(),
lr=self.cfg.initial_lr,
weight_decay=self.cfg.weight_decay,
momentum=self.cfg.momentum)
# setup lr scheduler
gamma = np.power(
self.cfg.ultimate_lr / self.cfg.initial_lr,
1.0 / self.cfg.epoch_num)
self.lr_scheduler = ExponentialLR(self.optimizer, gamma)
示例8: load_sched
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def load_sched(optimizers, last_epoch):
inf_opt, gen_opt = optimizers
inf_sched = ExponentialLR(inf_opt.opt, 0.999, last_epoch=last_epoch)
gen_sched = ExponentialLR(gen_opt.opt, 0.999, last_epoch=last_epoch)
return (inf_sched, gen_sched)
示例9: configure_optimizers
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def configure_optimizers(self):
optimizer = optim.SGD(
self.parameters(),
lr=self.lr,
momentum=self.momentum,
weight_decay=self.weight_decay
)
scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.1)
return [optimizer], [scheduler]
示例10: lr_decay
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def lr_decay(cfg, optimizer):
if cfg.SIAMRPN.TRAIN.LR_POLICY == 'exp':
scheduler = ExponentialLR(optimizer, gamma=0.8685)
elif cfg.SIAMRPN.TRAIN.LR_POLICY == 'cos':
scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs)
elif cfg.SIAMRPN.TRAIN.LR_POLICY == 'Reduce':
scheduler = ReduceLROnPlateau(optimizer, patience=5)
elif cfg.SIAMRPN.TRAIN.LR_POLICY == 'log':
scheduler = np.logspace(math.log10(cfg.SIAMRPN.TRAIN.LR), math.log10(cfg.SIAMRPN.TRAIN.LR_END), cfg.SIAMRPN.TRAIN.END_EPOCH)
else:
raise ValueError('unsupported learing rate scheduler')
return scheduler
示例11: lr_decay
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def lr_decay(cfg, optimizer):
if cfg.SIAMFC.TRAIN.LR_POLICY == 'exp':
scheduler = ExponentialLR(optimizer, gamma=0.8685)
elif cfg.SIAMFC.TRAIN.LR_POLICY == 'cos':
scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs)
elif cfg.SIAMFC.TRAIN.LR_POLICY == 'Reduce':
scheduler = ReduceLROnPlateau(optimizer, patience=5)
elif cfg.SIAMFC.TRAIN.LR_POLICY == 'log':
scheduler = np.logspace(math.log10(cfg.SIAMFC.TRAIN.LR), math.log10(cfg.SIAMFC.TRAIN.LR_END), cfg.SIAMFC.TRAIN.END_EPOCH)
else:
raise ValueError('unsupported learing rate scheduler')
return scheduler
示例12: build_lr_scheduler
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def build_lr_scheduler(optimizer: Optimizer, args: Namespace, total_epochs: List[int] = None) -> _LRScheduler:
"""
Builds a learning rate scheduler.
:param optimizer: The Optimizer whose learning rate will be scheduled.
:param args: Arguments.
:return: An initialized learning rate scheduler.
"""
# Learning rate scheduler
if args.scheduler == 'noam':
return NoamLR(
optimizer=optimizer,
warmup_epochs=args.warmup_epochs,
total_epochs=total_epochs or [args.epochs] * args.num_lrs,
steps_per_epoch=args.train_data_size // args.batch_size,
init_lr=args.init_lr,
max_lr=args.max_lr,
final_lr=args.final_lr
)
if args.scheduler == 'none':
return MockLR(optimizer=optimizer, lr=args.init_lr)
if args.scheduler == 'decay':
return ExponentialLR(optimizer, args.lr_decay_rate)
raise ValueError(f'Learning rate scheduler "{args.scheduler}" not supported.')
示例13: ExponentialLR
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def ExponentialLR(T_max,
eta_min=0,
last_epoch=-1):
return partial(_lr_scheduler.ExponentialLR, **locals())
示例14: __init__
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def __init__(self, *, gamma, last_epoch=-1):
"""Decays the learning rate of each parameter group by gamma every epoch.
When last_epoch=-1, sets initial lr as lr.
Args:
gamma (float): Multiplicative factor of learning rate decay.
last_epoch (int): The index of last epoch. Default: -1.
"""
super().__init__(lr_scheduler.ExponentialLR, gamma=gamma, last_epoch=last_epoch)
示例15: _get_scheduler
# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import ExponentialLR [as 别名]
def _get_scheduler(self, optimizer, config):
return lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=config['gamma'], last_epoch=-1)