本文整理汇总了Python中torch.optim.optimizer.Optimizer方法的典型用法代码示例。如果您正苦于以下问题:Python optimizer.Optimizer方法的具体用法?Python optimizer.Optimizer怎么用?Python optimizer.Optimizer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.optim.optimizer
的用法示例。
在下文中一共展示了optimizer.Optimizer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def __init__(self, optimizer, batches, epochs, base_lr,
target_lr=0, warmup_epochs=0, warmup_lr=0, last_iter=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_iter == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
last_iter = 0
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError(
"param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.baselr = base_lr
self.learning_rate = base_lr
self.niters = epochs * batches
self.targetlr = target_lr
self.warmup_iters = batches * warmup_epochs
self.warmup_lr = warmup_lr
self.last_iter = last_iter
self.step()
示例2: restore
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def restore(self, modules, ckpt_p, strict=True, restore_restart=False):
print('Restoring {}... (strict={})'.format(ckpt_p, strict))
map_location = None if pe.CUDA_AVAILABLE else 'cpu'
state_dicts = torch.load(ckpt_p, map_location=map_location)
# ---
for key, m in modules.items():
# optim implements its own load_state_dict which does not have the `strict` keyword...
if isinstance(m, optimizer.Optimizer):
if restore_restart:
print('Not restoring optimizer, --restore_restart given...')
else:
try:
m.load_state_dict(state_dicts[key])
except ValueError as e:
raise ValueError('Error while restoring Optimizer:', str(e))
else:
try:
m.load_state_dict(state_dicts[key], strict=strict)
except RuntimeError as e: # loading error
for n, module in sorted(m.named_modules()):
print(n, module)
raise e
return self.get_itr_from_ckpt_p(ckpt_p)
示例3: __init__
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def __init__(self, optimizer, solver, start_iter=1, iter_per_epoch=-1, local_rank=0):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(type(optimizer).__name__))
self.optimizer = optimizer
self.solver = solver
assert self.solver.LR_POLICY in ['STEP', 'COSINE', 'STEP_COSINE', 'POLY']
assert self.solver.WARM_UP_METHOD in ['CONSTANT', 'LINEAR']
self.base_lr = self.solver.BASE_LR
self.new_lr = self.base_lr
self.iteration = start_iter
self.iter_per_epoch = iter_per_epoch
self.local_rank = local_rank
self.info = dict(best_acc=0.0, best_epoch=1, cur_acc=0.0, cur_epoch=1)
if 'MAX_ITER' in self.solver:
self.max_iter = self.solver.MAX_ITER
self.warm_up_iters = self.solver.WARM_UP_ITERS
self.steps = self.solver.STEPS # only useful for step policy
else:
assert self.iter_per_epoch > 0 # need to specify the iter_per_epoch
self.conver_epoch2iter()
示例4: checkpoint_model
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def checkpoint_model(
self,
model: EmmentalModel,
optimizer: Optimizer,
lr_scheduler: _LRScheduler,
metric_dict: Dict[str, float],
) -> None:
"""Checkpoint the model.
Args:
model: The model to checkpoint.
optimizer: The optimizer used during training process.
lr_scheduler: Learning rate scheduler.
metric_dict: the metric dict.
"""
self.checkpointer.checkpoint(
self.unit_total, model, optimizer, lr_scheduler, metric_dict
)
示例5: serialize_torch_obj
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def serialize_torch_obj(
model: nn.Module,
criterion: Any,
optimizer: Type[Optimizer],
**kwargs
) -> str:
model_encoded = torch_encoder(
TorchObj(
model=model,
criterion=criterion,
optimizer=optimizer,
optimizer_params=kwargs,
is_lazy=False,
model_parameters=None
)
)
return json.dumps({
'torch_obj': model_encoded,
'shapes': [list(ps.shape) for ps in model.parameters()]
})
示例6: __init__
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def __init__(self, optimizer, last_step=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_step == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError(
"param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".
format(i))
self.base_lrs = list(
map(lambda group: group['initial_lr'], optimizer.param_groups))
self.step(last_step + 1)
self.last_step = last_step
示例7: __init__
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
verbose=0, epsilon=1e-4, cooldown=0, min_lr=0,eps=1e-8):
super(ReduceLROnPlateau, self).__init__()
assert isinstance(optimizer, Optimizer)
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau '
'does not support a factor >= 1.0.')
self.factor = factor
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.monitor_op = None
self.wait = 0
self.best = 0
self.mode = mode
self.optimizer = optimizer
self.eps = eps
self._reset()
示例8: __init__
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def __init__(self, optimizer, last_epoch=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.step(last_epoch + 1)
self.last_epoch = last_epoch
示例9: on_before_zero_grad
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def on_before_zero_grad(self, optimizer: Optimizer) -> None:
"""
Called after optimizer.step() and before optimizer.zero_grad().
Called in the training loop after taking an optimizer step and before zeroing grads.
Good place to inspect weight information with weights updated.
This is where it is called::
for optimizer in optimizers:
optimizer.step()
model.on_before_zero_grad(optimizer) # < ---- called here
optimizer.zero_grad
Args:
optimizer: The optimizer for which grads should be zeroed.
"""
# do something with the optimizer or inspect it.
示例10: backward
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def backward(self, trainer, loss: Tensor, optimizer: Optimizer, optimizer_idx: int) -> None:
"""
Override backward with your own implementation if you need to.
Args:
trainer: Pointer to the trainer
loss: Loss is already scaled by accumulated grads
optimizer: Current optimizer being used
optimizer_idx: Index of the current optimizer being used
Called to perform backward step.
Feel free to override as needed.
The loss passed in has already been scaled for accumulated gradients if requested.
Example::
def backward(self, trainer, loss, optimizer, optimizer_idx):
loss.backward()
"""
loss.backward()
示例11: __init__
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def __init__(self, base_optimizer, sync_rate=0.5, sync_period=6):
if sync_rate < 0 or sync_rate > 1:
raise ValueError(f'expected positive float lower than 1 as sync_rate, received: {sync_rate}')
if not isinstance(sync_period, int) or sync_period < 1:
raise ValueError(f'expected positive integer as sync_period, received: {sync_period}')
# Optimizer attributes
self.defaults = dict(sync_rate=sync_rate, sync_period=sync_period)
self.state = defaultdict(dict)
# Base optimizer attributes
self.base_optimizer = base_optimizer
# Wrapper attributes
self.fast_steps = 0
self.param_groups = []
for group in self.base_optimizer.param_groups:
self._add_param_group(group)
# Buffer for scouting
self.buffer = []
for group in self.param_groups:
for p in group['params']:
self.buffer.append(p.data.unsqueeze(0))
示例12: __init__
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
verbose=0, epsilon=1e-4, cooldown=0, min_lr=0):
super(ReduceLROnPlateau, self).__init__()
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau '
'does not support a factor >= 1.0.')
self.factor = factor
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.monitor_op = None
self.wait = 0
self.best = 0
self.mode = mode
assert isinstance(optimizer, Optimizer)
self.optimizer = optimizer
self._reset()
示例13: __init__
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def __init__(self, optimizer, last_epoch=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(
map(lambda group: group['initial_lr'], optimizer.param_groups))
self.step(last_epoch + 1)
self.last_epoch = last_epoch
示例14: setup_tb_logging
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def setup_tb_logging(output_path, trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs):
"""Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
output_path (str): logging directory path
trainer (Engine): trainer engine
optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
**kwargs: optional keyword args to be passed to construct the logger.
Returns:
TensorboardLogger
"""
logger = TensorboardLogger(log_dir=output_path, **kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger
示例15: setup_visdom_logging
# 需要导入模块: from torch.optim import optimizer [as 别名]
# 或者: from torch.optim.optimizer import Optimizer [as 别名]
def setup_visdom_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs):
"""Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer (Engine): trainer engine
optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
**kwargs: optional keyword args to be passed to construct the logger.
Returns:
VisdomLogger
"""
logger = VisdomLogger(**kwargs)
_setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
return logger