当前位置: 首页>>代码示例>>Python>>正文


Python optim.Optimizer方法代码示例

本文整理汇总了Python中torch.optim.Optimizer方法的典型用法代码示例。如果您正苦于以下问题:Python optim.Optimizer方法的具体用法?Python optim.Optimizer怎么用?Python optim.Optimizer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.optim的用法示例。


在下文中一共展示了optim.Optimizer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def __init__(self, optimizer, last_epoch=-1):
        if not isinstance(optimizer, Optimizer):
            raise TypeError('{} is not an Optimizer'.format(
                type(optimizer).__name__))
        self.optimizer = optimizer
        if last_epoch == -1:
            for group in optimizer.param_groups:
                group.setdefault('initial_lr', group['lr'])
        else:
            for i, group in enumerate(optimizer.param_groups):
                if 'initial_lr' not in group:
                    raise KeyError("param 'initial_lr' is not specified "
                                   "in param_groups[{}] when resuming an optimizer".format(i))
        self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
        self.step(last_epoch + 1)
        self.last_epoch = last_epoch 
开发者ID:mapillary,项目名称:seamseg,代码行数:18,代码来源:scheduler.py

示例2: load

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def load(self, path_to_checkpoint: str, optimizer: Optimizer = None, scheduler: _LRScheduler = None) -> 'Model':
        checkpoint = torch.load(path_to_checkpoint)
        self.load_state_dict(checkpoint['state_dict'])

        # model_dict = self.state_dict()
        # pretrained_dict = {k: v for k, v in checkpoint.items() if k in model_dict}  # filter out unnecessary keys
        # model_dict.update(pretrained_dict)
        # self.load_state_dict(model_dict)
        # torch.nn.DataParallel(self).cuda()
        #step = checkpoint['step']
        step=0
        # if optimizer is not None:
        #     optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        # if scheduler is not None:
        #     scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        return step 
开发者ID:MagicChuyi,项目名称:SlowFast-Network-pytorch,代码行数:18,代码来源:model.py

示例3: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def __init__(
        self,
        model,
        optimizer,
        checkpoint_dirpath,
        step_size=1,
        last_epoch=-1,
        **kwargs,
    ):

        if not isinstance(model, nn.Module):
            raise TypeError("{} is not a Module".format(type(model).__name__))

        if not isinstance(optimizer, optim.Optimizer):
            raise TypeError(
                "{} is not an Optimizer".format(type(optimizer).__name__)
            )

        self.model = model
        self.optimizer = optimizer
        self.ckpt_dirpath = Path(checkpoint_dirpath)
        self.step_size = step_size
        self.last_epoch = last_epoch
        self.init_directory(**kwargs) 
开发者ID:batra-mlp-lab,项目名称:visdial-challenge-starter-pytorch,代码行数:26,代码来源:checkpointing.py

示例4: gradient_step

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def gradient_step(model: Module, optimiser: Optimizer, loss_fn: Callable, x: torch.Tensor, y: torch.Tensor, **kwargs):
    """Takes a single gradient step.

    # Arguments
        model: Model to be fitted
        optimiser: Optimiser to calculate gradient step from loss
        loss_fn: Loss function to calculate between predictions and outputs
        x: Input samples
        y: Input targets
    """
    model.train()
    optimiser.zero_grad()
    y_pred = model(x)
    loss = loss_fn(y_pred, y)
    loss.backward()
    optimiser.step()

    return loss, y_pred 
开发者ID:oscarknagg,项目名称:few-shot,代码行数:20,代码来源:train.py

示例5: optimizer_step

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def optimizer_step(optimizer: Optimizer, loss: torch.Tensor, **params) -> torch.Tensor:
    """
    Performs the backward pass with respect to ``loss``, as well as a gradient step.

    ``params`` is used to change the optimizer's parameters.

    Examples
    --------
    >>> optimizer = Adam(model.parameters(), lr=1)
    >>> optimizer_step(optimizer, loss) # perform a gradient step
    >>> optimizer_step(optimizer, loss, lr=1e-3) # set lr to 1e-3 and perform a gradient step
    >>> optimizer_step(optimizer, loss, betas=(0, 0)) # set betas to 0 and perform a gradient step

    Notes
    -----
    The incoming ``optimizer``'s parameters are not restored to their original values.
    """
    set_params(optimizer, **params)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    return loss 
开发者ID:neuro-ml,项目名称:deep_pipe,代码行数:24,代码来源:model.py

示例6: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def __init__(
        self,
        optimizer: Optimizer,
        total_steps: int,
        warmup_steps: int,
        milestones: List[int],
        gamma: float = 0.1,
        last_epoch: int = -1,
    ):
        self.wsteps = warmup_steps
        self.milestones = milestones
        self.gamma = gamma

        # Keep a track of number of milestones encountered.
        self.milestones_so_far = 0

        # Common sanity checks.
        assert milestones == sorted(milestones), "milestones must be increasing"
        assert milestones[0] > warmup_steps, "first milestone must be after warmup"
        assert (
            milestones[-1] < total_steps
        ), "last milestone must be less than total steps"

        super().__init__(optimizer, self._lr_multiplier, last_epoch) 
开发者ID:kdexd,项目名称:virtex,代码行数:26,代码来源:lr_scheduler.py

示例7: current_lr

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def current_lr(self):
        """Get current learning rates.

        Returns:
            list[float] | dict[str, list[float]]: Current learning rates of all
                param groups. If the runner has a dict of optimizers, this
                method will return a dict.
        """
        if isinstance(self.optimizer, torch.optim.Optimizer):
            lr = [group['lr'] for group in self.optimizer.param_groups]
        elif isinstance(self.optimizer, dict):
            lr = dict()
            for name, optim in self.optimizer.items():
                lr[name] = [group['lr'] for group in optim.param_groups]
        else:
            raise RuntimeError(
                'lr is not applicable because optimizer does not exist.')
        return lr 
开发者ID:open-mmlab,项目名称:mmcv,代码行数:20,代码来源:base_runner.py

示例8: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def __init__(self, hidden_size: int, optimizer: torch.optim.Optimizer,
                 factor: float = 1, warmup: int = 4000):
        """
        Warm-up, followed by learning rate decay.

        :param hidden_size:
        :param optimizer:
        :param factor: decay factor
        :param warmup: number of warmup steps
        """
        self.optimizer = optimizer
        self._step = 0
        self.warmup = warmup
        self.factor = factor
        self.hidden_size = hidden_size
        self._rate = 0 
开发者ID:joeynmt,项目名称:joeynmt,代码行数:18,代码来源:builders.py

示例9: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def __init__(self, optimizer, last_epoch=-1):
        if not isinstance(optimizer, Optimizer):
            raise TypeError('{} is not an Optimizer'.format(
                type(optimizer).__name__))
        self.optimizer = optimizer
        if last_epoch == -1:
            for group in optimizer.param_groups:
                group.setdefault('initial_lr', group['lr'])
            last_epoch = 0
        else:
            for i, group in enumerate(optimizer.param_groups):
                if 'initial_lr' not in group:
                    raise KeyError("param 'initial_lr' is not specified "
                                   "in param_groups[{}] when resuming an optimizer".format(i))
        self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
        self.last_epoch = last_epoch
        self.step(last_epoch) 
开发者ID:iotayo,项目名称:aivivn-tone,代码行数:19,代码来源:cyclic_lr.py

示例10: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def __init__(self, model: Module, train_stages: [], loss: Module, optimizer: Optimizer):
        self._train_stages = train_stages
        self._loss = loss
        self._optimizer = optimizer
        self._model = model 
开发者ID:toodef,项目名称:neural-pipeline,代码行数:7,代码来源:train_config.py

示例11: optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def optimizer(self) -> Optimizer:
        """
        Get optimizer object

        :return: optimizer object
        """
        return self._optimizer 
开发者ID:toodef,项目名称:neural-pipeline,代码行数:9,代码来源:train_config.py

示例12: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def __init__(self, optimizer: Optimizer, milestones: List[int], gamma: float = 0.1,
                 factor: float = 0.3333, num_iters: int = 500, last_epoch: int = -1):
        self.factor = factor
        self.num_iters = num_iters
        super().__init__(optimizer, milestones, gamma, last_epoch) 
开发者ID:potterhsu,项目名称:easy-faster-rcnn.pytorch,代码行数:7,代码来源:lr_scheduler.py

示例13: save

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def save(self, path_to_checkpoints_dir: str, step: int, optimizer: Optimizer, scheduler: _LRScheduler) -> str:
        path_to_checkpoint = os.path.join(path_to_checkpoints_dir, f'model-{step}.pth')
        checkpoint = {
            'state_dict': self.state_dict(),
            'step': step,
            'optimizer_state_dict': optimizer.state_dict(),
            'scheduler_state_dict': scheduler.state_dict()
        }
        torch.save(checkpoint, path_to_checkpoint)
        return path_to_checkpoint 
开发者ID:potterhsu,项目名称:easy-faster-rcnn.pytorch,代码行数:12,代码来源:model.py

示例14: load

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def load(self, path_to_checkpoint: str, optimizer: Optimizer = None, scheduler: _LRScheduler = None) -> 'Model':
        checkpoint = torch.load(path_to_checkpoint)
        self.load_state_dict(checkpoint['state_dict'])
        step = checkpoint['step']
        if optimizer is not None:
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        if scheduler is not None:
            scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        return step 
开发者ID:potterhsu,项目名称:easy-faster-rcnn.pytorch,代码行数:11,代码来源:model.py

示例15: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Optimizer [as 别名]
def __init__(self, model: Union[StateSpaceModel, StochasticProcess], samples=4, approx: BaseApproximation = None,
                 optimizer: Type[Optimizer] = Adam, max_iter=30e3, optkwargs=None, use_filter=True):
        """
        Implements Variational Bayes for stochastic processes implementing either `StateSpaceModel` or
        `StochasticProcess`.
        :param model: The model
        :param samples: The number of samples
        :param approx: The variational approximation to use for the latent space
        :param optimizer: The optimizer
        :param max_iter: The maximum number of iterations
        :param optkwargs: Any optimizer specific kwargs
        """

        super().__init__(max_iter)
        self._model = model
        self._ns = samples

        # ===== Approximations ===== #
        self._is_ssm = isinstance(model, StateSpaceModel)
        self._s_approx = None

        if self._is_ssm:
            self._s_approx = approx or StateMeanField(model.hidden)

        self._p_approx = ParameterMeanField()

        # ===== Helpers ===== #
        self._mask = None
        self._runavg = 0.
        self._decay = 0.975
        self._use_filter = use_filter

        # ===== Optimization stuff ===== #
        self._opt_type = optimizer
        self._optimizer = None
        self.optkwargs = optkwargs or dict() 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:38,代码来源:vb.py


注:本文中的torch.optim.Optimizer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。