當前位置: 首頁>>代碼示例>>Python>>正文


Python lr_scheduler.CosineAnnealingLR方法代碼示例

本文整理匯總了Python中torch.optim.lr_scheduler.CosineAnnealingLR方法的典型用法代碼示例。如果您正苦於以下問題:Python lr_scheduler.CosineAnnealingLR方法的具體用法?Python lr_scheduler.CosineAnnealingLR怎麽用?Python lr_scheduler.CosineAnnealingLR使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.optim.lr_scheduler的用法示例。


在下文中一共展示了lr_scheduler.CosineAnnealingLR方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_lr_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def create_lr_scheduler(optimizer, config):
    if config.lr_scheduler == 'cos':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer,
                                                   T_max=config.epochs,
                                                   eta_min=config.min_lr)
    elif config.lr_scheduler == 'multistep':
        if config.steps is None: return None
        if isinstance(config.steps, int): config.steps = [config.steps]
        scheduler = lr_scheduler.MultiStepLR(optimizer,
                                             milestones=config.steps,
                                             gamma=config.gamma)
    elif config.lr_scheduler == 'exp-warmup':
        lr_lambda = exp_warmup(config.rampup_length,
                               config.rampdown_length,
                               config.epochs)
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
    elif config.lr_scheduler == 'none':
        scheduler = None
    else:
        raise ValueError("No such scheduler: {}".format(config.lr_scheduler))
    return scheduler 
開發者ID:iBelieveCJM,項目名稱:Tricks-of-Semi-supervisedDeepLeanring-Pytorch,代碼行數:23,代碼來源:main.py

示例2: get_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch-
                             opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(
            optimizer, step_size=opt.lr_decay_iters, gamma=0.5)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(
            optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
開發者ID:bj80heyue,項目名稱:One_Shot_Face_Reenactment,代碼行數:21,代碼來源:base_net.py

示例3: get_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.nepoch) / float(opt.nepoch_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.nepoch, eta_min=0)
    elif opt.lr_policy == 'cyclic':
        scheduler = CyclicLR(optimizer, base_lr=opt.learning_rate / 10, max_lr=opt.learning_rate,
                             step_size=opt.nepoch_decay, mode='triangular2')
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler


# learning rate schedules 
開發者ID:oxai,項目名稱:deepsaber,代碼行數:23,代碼來源:networks.py

示例4: get_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def get_scheduler(optimizer, opt):
    """Return a learning rate scheduler
    Parameters:
        optimizer          -- the optimizer of the network
        opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
                              opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
    For 'linear', we keep the same learning rate for the first <opt.niter> epochs
    and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
    For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
    See https://pytorch.org/docs/stable/optim.html for more details.
    """
    if opt.lr_policy == 'linear':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
開發者ID:Boyiliee,項目名稱:PONO,代碼行數:27,代碼來源:networks_pono.py

示例5: create_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def create_scheduler(args, optimizer, datasets):
    if args.scheduler == 'step':
        scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=eval(args.milestones), gamma=args.lr_decay)
    elif args.scheduler == 'poly':
        total_step = (len(datasets['train']) / args.batch + 1) * args.epochs
        scheduler = lr_scheduler.LambdaLR(optimizer, lambda x: (1-x/total_step) ** args.power)
    elif args.scheduler == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=args.lr_decay, patience=args.patience)
    elif args.scheduler == 'constant':
        scheduler = lr_scheduler.LambdaLR(optimizer, lambda x: 1)
    elif args.scheduler == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, args.T_max, args.min_lr)
    return scheduler 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:15,代碼來源:scheduler.py

示例6: configure_lr_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def configure_lr_scheduler(self, optimizer, cfg):
        if cfg.SCHEDULER == 'step':
            scheduler = lr_scheduler.StepLR(optimizer, step_size=cfg.STEPS[0], gamma=cfg.GAMMA)
        elif cfg.SCHEDULER == 'multi_step':
            scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=cfg.STEPS, gamma=cfg.GAMMA)
        elif cfg.SCHEDULER == 'exponential':
            scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=cfg.GAMMA)
        elif cfg.SCHEDULER == 'SGDR':
            scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg.MAX_EPOCHS)
        else:
            AssertionError('scheduler can not be recognized.')
        return scheduler 
開發者ID:ShuangXieIrene,項目名稱:ssds.pytorch,代碼行數:14,代碼來源:ssds_train.py

示例7: get_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def get_scheduler(optimizer, opt):
    """Return a learning rate scheduler

    Parameters:
        optimizer          -- the optimizer of the network
        opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
                              opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine

    For 'linear', we keep the same learning rate for the first <opt.niter> epochs
    and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
    For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
    See https://pytorch.org/docs/stable/optim.html for more details.
    """
    if opt.lr_policy == 'linear':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
開發者ID:Mingtzge,項目名稱:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代碼行數:29,代碼來源:networks.py

示例8: cosine

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def cosine(optimizer, last_epoch, T_max=50, eta_min=0.00001, **_):
  print('cosine annealing, T_max: {}, eta_min: {}, last_epoch: {}'.format(T_max, eta_min, last_epoch))
  return lr_scheduler.CosineAnnealingLR(optimizer, T_max=T_max, eta_min=eta_min,
                                        last_epoch=last_epoch) 
開發者ID:pudae,項目名稱:kaggle-humpback,代碼行數:6,代碼來源:scheduler_factory.py

示例9: __init__

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
        scheduler = CosineAnnealingLR(
            optimizer,
            T_max=T_max,
            eta_min=eta_min,
            last_epoch=last_epoch,
        )
        super().__init__(scheduler) 
開發者ID:PavelOstyakov,項目名稱:pipeline,代碼行數:10,代碼來源:cyclical_lr_scheduler.py

示例10: get_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def get_scheduler(optimizer, opt):
    """Return a learning rate scheduler

    Parameters:
        optimizer          -- the optimizer of the network
        opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
                              opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine

    For 'linear', we keep the same learning rate for the first <opt.niter> epochs
    and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
    For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
    See https://pytorch.org/docs/stable/optim.html for more details.
    """
    if opt.lr_policy == 'linear':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l

        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
開發者ID:ermongroup,項目名稱:ncsn,代碼行數:30,代碼來源:pix2pix.py

示例11: build_lr_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def build_lr_scheduler(self):
        """Build cosine learning rate scheduler."""
        self.G_scheduler = lr_scheduler.CosineAnnealingLR(self.G_optimizer,
                                                          T_max=self.train_config.total_step
                                                                - self.train_config.warmup_step) 
開發者ID:Yaoyi-Li,項目名稱:GCA-Matting,代碼行數:7,代碼來源:trainer.py

示例12: restore_model

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def restore_model(self, resume_checkpoint):
        """
        Restore the trained generator and discriminator.
        :param resume_checkpoint: File name of checkpoint
        :return:
        """
        pth_path = os.path.join(self.log_config.checkpoint_path, '{}.pth'.format(resume_checkpoint))
        checkpoint = torch.load(pth_path, map_location = lambda storage, loc: storage.cuda(CONFIG.gpu))
        self.resume_step = checkpoint['iter']
        self.logger.info('Loading the trained models from step {}...'.format(self.resume_step))
        self.G.load_state_dict(checkpoint['state_dict'], strict=True)

        if not self.train_config.reset_lr:
            if 'opt_state_dict' in checkpoint.keys():
                try:
                    self.G_optimizer.load_state_dict(checkpoint['opt_state_dict'])
                except ValueError as ve:
                    self.logger.error("{}".format(ve))
            else:
                self.logger.info('No Optimizer State Loaded!!')

            if 'lr_state_dict' in checkpoint.keys():
                try:
                    self.G_scheduler.load_state_dict(checkpoint['lr_state_dict'])
                except ValueError as ve:
                    self.logger.error("{}".format(ve))
        else:
            self.G_scheduler = lr_scheduler.CosineAnnealingLR(self.G_optimizer,
                                                              T_max=self.train_config.total_step - self.resume_step - 1)

        if 'loss' in checkpoint.keys():
            self.best_loss = checkpoint['loss'] 
開發者ID:Yaoyi-Li,項目名稱:GCA-Matting,代碼行數:34,代碼來源:trainer.py

示例13: __init__

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def __init__(self, T_max, eta_min=0, step_on_iteration=False):
        super().__init__(
            lambda opt: _scheduler.CosineAnnealingLR(opt,
                                                     T_max,
                                                     eta_min=eta_min),
            step_on_iteration=step_on_iteration
        ) 
開發者ID:lRomul,項目名稱:argus,代碼行數:9,代碼來源:lr_schedulers.py

示例14: get_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def get_scheduler(optimizer, opt):
    """Return a learning rate scheduler

    Parameters:
        optimizer          -- the optimizer of the network
        opt (option dicts) -- stores all the experiment flags;
                              opt['lr_policy'] is the name of learning rate policy: linear | step | plateau | cosine

    For 'linear', we keep the same learning rate for the first <opt['nepoch']> epochs
    and linearly decay the rate to zero over the next <opt['nepoch_decay']> epochs.
    For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
    See https://pytorch.org/docs/stable/optim.html for more details.
    """
    if opt['lr_policy'] == 'linear':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt['epoch_count'] - opt['nepoch']) / float(opt['nepoch_decay'] + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt['lr_policy'] == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt['lr_decay_iters'], gamma=0.1)
    elif opt['lr_policy'] == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt['lr_policy'] == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt['nepoch'], eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt['lr_policy'])
    return scheduler 
開發者ID:liweileev,項目名稱:FET-GAN,代碼行數:29,代碼來源:blocks.py

示例15: DelayedCosineAnnealingLR

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import CosineAnnealingLR [as 別名]
def DelayedCosineAnnealingLR(optimizer, delay_iters, max_iters, eta_min_lr, warmup_factor,
                             warmup_iters, warmup_method, **kwargs, ):
    cosine_annealing_iters = max_iters - delay_iters
    base_scheduler = CosineAnnealingLR(optimizer, cosine_annealing_iters, eta_min_lr)
    return DelayedScheduler(optimizer, delay_iters, base_scheduler, warmup_factor, warmup_iters, warmup_method) 
開發者ID:JDAI-CV,項目名稱:fast-reid,代碼行數:7,代碼來源:lr_scheduler.py


注:本文中的torch.optim.lr_scheduler.CosineAnnealingLR方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。