當前位置: 首頁>>代碼示例>>Python>>正文


Python optimizer.Optimizer方法代碼示例

本文整理匯總了Python中torch.optim.optimizer.Optimizer方法的典型用法代碼示例。如果您正苦於以下問題:Python optimizer.Optimizer方法的具體用法?Python optimizer.Optimizer怎麽用?Python optimizer.Optimizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.optim.optimizer的用法示例。


在下文中一共展示了optimizer.Optimizer方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def __init__(self, optimizer, batches, epochs, base_lr,
                 target_lr=0, warmup_epochs=0, warmup_lr=0, last_iter=-1):
        if not isinstance(optimizer, Optimizer):
            raise TypeError('{} is not an Optimizer'.format(
                type(optimizer).__name__))
        self.optimizer = optimizer
        if last_iter == -1:
            for group in optimizer.param_groups:
                group.setdefault('initial_lr', group['lr'])
            last_iter = 0
        else:
            for i, group in enumerate(optimizer.param_groups):
                if 'initial_lr' not in group:
                    raise KeyError(
                        "param 'initial_lr' is not specified "
                        "in param_groups[{}] when resuming an optimizer".format(i))

        self.baselr = base_lr
        self.learning_rate = base_lr
        self.niters = epochs * batches
        self.targetlr = target_lr
        self.warmup_iters = batches * warmup_epochs
        self.warmup_lr = warmup_lr
        self.last_iter = last_iter
        self.step() 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:27,代碼來源:lr_scheduler.py

示例2: restore

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def restore(self, modules, ckpt_p, strict=True, restore_restart=False):
        print('Restoring {}... (strict={})'.format(ckpt_p, strict))
        map_location = None if pe.CUDA_AVAILABLE else 'cpu'
        state_dicts = torch.load(ckpt_p, map_location=map_location)
        # ---
        for key, m in modules.items():
            # optim implements its own load_state_dict which does not have the `strict` keyword...
            if isinstance(m, optimizer.Optimizer):
                if restore_restart:
                    print('Not restoring optimizer, --restore_restart given...')
                else:
                    try:
                        m.load_state_dict(state_dicts[key])
                    except ValueError as e:
                        raise ValueError('Error while restoring Optimizer:', str(e))
            else:
                try:
                    m.load_state_dict(state_dicts[key], strict=strict)
                except RuntimeError as e:  # loading error
                    for n, module in sorted(m.named_modules()):
                        print(n, module)
                    raise e
        return self.get_itr_from_ckpt_p(ckpt_p) 
開發者ID:fab-jul,項目名稱:L3C-PyTorch,代碼行數:25,代碼來源:saver.py

示例3: __init__

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def __init__(self, optimizer, solver, start_iter=1, iter_per_epoch=-1, local_rank=0):
        if not isinstance(optimizer, Optimizer):
            raise TypeError('{} is not an Optimizer'.format(type(optimizer).__name__))
        self.optimizer = optimizer

        self.solver = solver
        assert self.solver.LR_POLICY in ['STEP', 'COSINE', 'STEP_COSINE', 'POLY']
        assert self.solver.WARM_UP_METHOD in ['CONSTANT', 'LINEAR']
        self.base_lr = self.solver.BASE_LR
        self.new_lr = self.base_lr

        self.iteration = start_iter
        self.iter_per_epoch = iter_per_epoch
        self.local_rank = local_rank

        self.info = dict(best_acc=0.0, best_epoch=1, cur_acc=0.0, cur_epoch=1)

        if 'MAX_ITER' in self.solver:
            self.max_iter = self.solver.MAX_ITER
            self.warm_up_iters = self.solver.WARM_UP_ITERS
            self.steps = self.solver.STEPS  # only useful for step policy
        else:
            assert self.iter_per_epoch > 0  # need to specify the iter_per_epoch
            self.conver_epoch2iter() 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:26,代碼來源:lr_scheduler.py

示例4: checkpoint_model

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def checkpoint_model(
        self,
        model: EmmentalModel,
        optimizer: Optimizer,
        lr_scheduler: _LRScheduler,
        metric_dict: Dict[str, float],
    ) -> None:
        """Checkpoint the model.

        Args:
          model: The model to checkpoint.
          optimizer: The optimizer used during training process.
          lr_scheduler: Learning rate scheduler.
          metric_dict: the metric dict.
        """
        self.checkpointer.checkpoint(
            self.unit_total, model, optimizer, lr_scheduler, metric_dict
        ) 
開發者ID:SenWu,項目名稱:emmental,代碼行數:20,代碼來源:logging_manager.py

示例5: serialize_torch_obj

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def serialize_torch_obj(
        model: nn.Module,
        criterion: Any,
        optimizer: Type[Optimizer],
        **kwargs
) -> str:
    model_encoded = torch_encoder(
        TorchObj(
            model=model,
            criterion=criterion,
            optimizer=optimizer,
            optimizer_params=kwargs,
            is_lazy=False,
            model_parameters=None
        )
    )
    return json.dumps({
        'torch_obj': model_encoded,
        'shapes': [list(ps.shape) for ps in model.parameters()]
    }) 
開發者ID:dmmiller612,項目名稱:sparktorch,代碼行數:22,代碼來源:util.py

示例6: __init__

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def __init__(self, optimizer, last_step=-1):
        if not isinstance(optimizer, Optimizer):
            raise TypeError('{} is not an Optimizer'.format(
                type(optimizer).__name__))
        self.optimizer = optimizer
        if last_step == -1:
            for group in optimizer.param_groups:
                group.setdefault('initial_lr', group['lr'])
        else:
            for i, group in enumerate(optimizer.param_groups):
                if 'initial_lr' not in group:
                    raise KeyError(
                        "param 'initial_lr' is not specified "
                        "in param_groups[{}] when resuming an optimizer".
                        format(i))
        self.base_lrs = list(
            map(lambda group: group['initial_lr'], optimizer.param_groups))
        self.step(last_step + 1)
        self.last_step = last_step 
開發者ID:traveller59,項目名稱:second.pytorch,代碼行數:21,代碼來源:learning_schedules.py

示例7: __init__

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
                 verbose=0, epsilon=1e-4, cooldown=0, min_lr=0,eps=1e-8):

        super(ReduceLROnPlateau, self).__init__()
        assert isinstance(optimizer, Optimizer)
        if factor >= 1.0:
            raise ValueError('ReduceLROnPlateau '
                             'does not support a factor >= 1.0.')
        self.factor = factor
        self.min_lr = min_lr
        self.epsilon = epsilon
        self.patience = patience
        self.verbose = verbose
        self.cooldown = cooldown
        self.cooldown_counter = 0  # Cooldown counter.
        self.monitor_op = None
        self.wait = 0
        self.best = 0
        self.mode = mode
        self.optimizer = optimizer
        self.eps = eps
        self._reset() 
開發者ID:lonePatient,項目名稱:Bert-Multi-Label-Text-Classification,代碼行數:24,代碼來源:lr_schedulers.py

示例8: __init__

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def __init__(self, optimizer, last_epoch=-1):
        if not isinstance(optimizer, Optimizer):
            raise TypeError('{} is not an Optimizer'.format(
                type(optimizer).__name__))
        self.optimizer = optimizer
        if last_epoch == -1:
            for group in optimizer.param_groups:
                group.setdefault('initial_lr', group['lr'])
        else:
            for i, group in enumerate(optimizer.param_groups):
                if 'initial_lr' not in group:
                    raise KeyError("param 'initial_lr' is not specified "
                                   "in param_groups[{}] when resuming an optimizer".format(i))
        self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
        self.step(last_epoch + 1)
        self.last_epoch = last_epoch 
開發者ID:lancopku,項目名稱:Global-Encoding,代碼行數:18,代碼來源:lr_scheduler.py

示例9: on_before_zero_grad

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def on_before_zero_grad(self, optimizer: Optimizer) -> None:
        """
        Called after optimizer.step() and before optimizer.zero_grad().

        Called in the training loop after taking an optimizer step and before zeroing grads.
        Good place to inspect weight information with weights updated.

        This is where it is called::

            for optimizer in optimizers:
                optimizer.step()
                model.on_before_zero_grad(optimizer) # < ---- called here
                optimizer.zero_grad

        Args:
            optimizer: The optimizer for which grads should be zeroed.
        """
        # do something with the optimizer or inspect it. 
開發者ID:PyTorchLightning,項目名稱:pytorch-lightning,代碼行數:20,代碼來源:hooks.py

示例10: backward

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def backward(self, trainer, loss: Tensor, optimizer: Optimizer, optimizer_idx: int) -> None:
        """
        Override backward with your own implementation if you need to.

        Args:
            trainer: Pointer to the trainer
            loss: Loss is already scaled by accumulated grads
            optimizer: Current optimizer being used
            optimizer_idx: Index of the current optimizer being used

        Called to perform backward step.
        Feel free to override as needed.

        The loss passed in has already been scaled for accumulated gradients if requested.

        Example::

            def backward(self, trainer, loss, optimizer, optimizer_idx):
                loss.backward()

        """
        loss.backward() 
開發者ID:PyTorchLightning,項目名稱:pytorch-lightning,代碼行數:24,代碼來源:hooks.py

示例11: __init__

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def __init__(self, base_optimizer, sync_rate=0.5, sync_period=6):
        if sync_rate < 0 or sync_rate > 1:
            raise ValueError(f'expected positive float lower than 1 as sync_rate, received: {sync_rate}')
        if not isinstance(sync_period, int) or sync_period < 1:
            raise ValueError(f'expected positive integer as sync_period, received: {sync_period}')
        # Optimizer attributes
        self.defaults = dict(sync_rate=sync_rate, sync_period=sync_period)
        self.state = defaultdict(dict)
        # Base optimizer attributes
        self.base_optimizer = base_optimizer
        # Wrapper attributes
        self.fast_steps = 0
        self.param_groups = []
        for group in self.base_optimizer.param_groups:
            self._add_param_group(group)
        # Buffer for scouting
        self.buffer = []
        for group in self.param_groups:
            for p in group['params']:
                self.buffer.append(p.data.unsqueeze(0)) 
開發者ID:frgfm,項目名稱:Holocron,代碼行數:22,代碼來源:wrapper.py

示例12: __init__

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
                 verbose=0, epsilon=1e-4, cooldown=0, min_lr=0):
        super(ReduceLROnPlateau, self).__init__()

        if factor >= 1.0:
            raise ValueError('ReduceLROnPlateau '
                             'does not support a factor >= 1.0.')
        self.factor = factor
        self.min_lr = min_lr
        self.epsilon = epsilon
        self.patience = patience
        self.verbose = verbose
        self.cooldown = cooldown
        self.cooldown_counter = 0  # Cooldown counter.
        self.monitor_op = None
        self.wait = 0
        self.best = 0
        self.mode = mode
        assert isinstance(optimizer, Optimizer)
        self.optimizer = optimizer
        self._reset() 
開發者ID:Jiaming-Liu,項目名稱:pytorch-lr-scheduler,代碼行數:23,代碼來源:lr_scheduler.py

示例13: __init__

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def __init__(self, optimizer, last_epoch=-1):
        if not isinstance(optimizer, Optimizer):
            raise TypeError('{} is not an Optimizer'.format(
                type(optimizer).__name__))
        self.optimizer = optimizer
        if last_epoch == -1:
            for group in optimizer.param_groups:
                group.setdefault('initial_lr', group['lr'])
        else:
            for i, group in enumerate(optimizer.param_groups):
                if 'initial_lr' not in group:
                    raise KeyError("param 'initial_lr' is not specified "
                                   "in param_groups[{}] when resuming an optimizer".format(i))
        self.base_lrs = list(
            map(lambda group: group['initial_lr'], optimizer.param_groups))
        self.step(last_epoch + 1)
        self.last_epoch = last_epoch 
開發者ID:THUDM,項目名稱:KOBE,代碼行數:19,代碼來源:lr_scheduler.py

示例14: setup_tb_logging

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def setup_tb_logging(output_path, trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs):
    """Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:
        - Training metrics, e.g. running average loss values
        - Learning rate(s)
        - Evaluation metrics

    Args:
        output_path (str): logging directory path
        trainer (Engine): trainer engine
        optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
            torch optimizers. If a dictionary, keys are used as tags arguments for logging.
        evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
            keys are used as tags arguments for logging.
        log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
            value can be set to 1 or None.
        **kwargs: optional keyword args to be passed to construct the logger.

    Returns:
        TensorboardLogger
    """
    logger = TensorboardLogger(log_dir=output_path, **kwargs)
    _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
    return logger 
開發者ID:pytorch,項目名稱:ignite,代碼行數:25,代碼來源:common.py

示例15: setup_visdom_logging

# 需要導入模塊: from torch.optim import optimizer [as 別名]
# 或者: from torch.optim.optimizer import Optimizer [as 別名]
def setup_visdom_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs):
    """Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:
        - Training metrics, e.g. running average loss values
        - Learning rate(s)
        - Evaluation metrics

    Args:
        trainer (Engine): trainer engine
        optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
            torch optimizers. If a dictionary, keys are used as tags arguments for logging.
        evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
            keys are used as tags arguments for logging.
        log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
            value can be set to 1 or None.
        **kwargs: optional keyword args to be passed to construct the logger.

    Returns:
        VisdomLogger
    """
    logger = VisdomLogger(**kwargs)
    _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)
    return logger 
開發者ID:pytorch,項目名稱:ignite,代碼行數:24,代碼來源:common.py


注:本文中的torch.optim.optimizer.Optimizer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。