当前位置: 首页>>代码示例>>Python>>正文


Python lr_scheduler.LambdaLR方法代码示例

本文整理汇总了Python中torch.optim.lr_scheduler.LambdaLR方法的典型用法代码示例。如果您正苦于以下问题:Python lr_scheduler.LambdaLR方法的具体用法?Python lr_scheduler.LambdaLR怎么用?Python lr_scheduler.LambdaLR使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.optim.lr_scheduler的用法示例。


在下文中一共展示了lr_scheduler.LambdaLR方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def create_scheduler(optimizer, total_steps, last_epoch):
    def lr_lbmd(cur_epoch):
        cur_decay = 1
        for decay_step in cfg.TRAIN.DECAY_STEP_LIST:
            if cur_epoch >= decay_step:
                cur_decay = cur_decay * cfg.TRAIN.LR_DECAY
        return max(cur_decay, cfg.TRAIN.LR_CLIP / cfg.TRAIN.LR)

    def bnm_lmbd(cur_epoch):
        cur_decay = 1
        for decay_step in cfg.TRAIN.BN_DECAY_STEP_LIST:
            if cur_epoch >= decay_step:
                cur_decay = cur_decay * cfg.TRAIN.BN_DECAY
        return max(cfg.TRAIN.BN_MOMENTUM * cur_decay, cfg.TRAIN.BNM_CLIP)

    if cfg.TRAIN.OPTIMIZER == 'adam_onecycle':
        lr_scheduler = lsf.OneCycle(
            optimizer, total_steps, cfg.TRAIN.LR, list(cfg.TRAIN.MOMS), cfg.TRAIN.DIV_FACTOR, cfg.TRAIN.PCT_START
        )
    else:
        lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)

    bnm_scheduler = train_utils.BNMomentumScheduler(model, bnm_lmbd, last_epoch=last_epoch)
    return lr_scheduler, bnm_scheduler 
开发者ID:sshaoshuai,项目名称:PointRCNN,代码行数:26,代码来源:train_rcnn.py

示例2: create_lr_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def create_lr_scheduler(optimizer, config):
    if config.lr_scheduler == 'cos':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer,
                                                   T_max=config.epochs,
                                                   eta_min=config.min_lr)
    elif config.lr_scheduler == 'multistep':
        if config.steps is None: return None
        if isinstance(config.steps, int): config.steps = [config.steps]
        scheduler = lr_scheduler.MultiStepLR(optimizer,
                                             milestones=config.steps,
                                             gamma=config.gamma)
    elif config.lr_scheduler == 'exp-warmup':
        lr_lambda = exp_warmup(config.rampup_length,
                               config.rampdown_length,
                               config.epochs)
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
    elif config.lr_scheduler == 'none':
        scheduler = None
    else:
        raise ValueError("No such scheduler: {}".format(config.lr_scheduler))
    return scheduler 
开发者ID:iBelieveCJM,项目名称:Tricks-of-Semi-supervisedDeepLeanring-Pytorch,代码行数:23,代码来源:main.py

示例3: get_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def get_scheduler(args, optimizer):
    name, args = parse_dict_args(args.scheduler)
    if name == "fix":
        return None
    elif name == "inverse_sqrt":
        warmup = args.get('warmup', 4000)
        lr = get_lr(optimizer)
        lr_step = lr / warmup
        decay = lr * warmup ** 0.5

        def warm_decay(n):
            if n < warmup:
                return lr_step * n
            return decay * n ** -0.5

        return LambdaLR(optimizer, warm_decay)
    elif name == 'decay':
        return LambdaLR(optimizer, lambda ep: 1 / (1 + 0.05 * ep)) 
开发者ID:choosewhatulike,项目名称:sparse-sharing,代码行数:20,代码来源:utils.py

示例4: get_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch-
                             opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(
            optimizer, step_size=opt.lr_decay_iters, gamma=0.5)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(
            optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
开发者ID:bj80heyue,项目名称:One_Shot_Face_Reenactment,代码行数:21,代码来源:base_net.py

示例5: get_cosine_with_hard_restarts_schedule_with_warmup

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def get_cosine_with_hard_restarts_schedule_with_warmup(
    optimizer, num_warmup_steps, num_training_steps, num_cycles=1.0, last_epoch=-1
):
    """ Create a schedule with a learning rate that decreases following the
    values of the cosine function with several hard restarts, after a warmup
    period during which it increases linearly between 0 and 1.
    """

    def lr_lambda(current_step):
        if current_step < num_warmup_steps:
            return float(current_step) / float(max(1, num_warmup_steps))
        progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
        if progress >= 1.0:
            return 0.0
        return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))

    return LambdaLR(optimizer, lr_lambda, last_epoch) 
开发者ID:bhoov,项目名称:exbert,代码行数:19,代码来源:optimization.py

示例6: _create_lr_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def _create_lr_scheduler(self, state_dict=None):
        if self._learning_rate_scheduler_type is None:  # happens when loading pre-generated network
            return
        # print('Creating new learning rate scheduler')
        learning_rate_scheduler_type = self._learning_rate_scheduler_type
        iter_end = self._total_train_traces_end
        lr_init = self._learning_rate_init
        lr_end = self._learning_rate_end

        def _poly_decay(iter, power):
            return (lr_init - lr_end) * ((1 - iter/iter_end) ** power) + lr_end

        if self._optimizer is None:
            self._learning_rate_scheduler = None
        elif learning_rate_scheduler_type == LearningRateScheduler.POLY1:
            self._learning_rate_scheduler = lr_scheduler.LambdaLR(self._optimizer, lr_lambda=lambda iter: _poly_decay(iter, power=1.) / lr_init)
        elif learning_rate_scheduler_type == LearningRateScheduler.POLY2:
            self._learning_rate_scheduler = lr_scheduler.LambdaLR(self._optimizer, lr_lambda=lambda iter: _poly_decay(iter, power=2.) / lr_init)
        else:
            self._learning_rate_scheduler = None
        if self._learning_rate_scheduler is not None and state_dict is not None:
            # print('Setting learning rate scheduler state')
            self._learning_rate_scheduler.load_state_dict(state_dict) 
开发者ID:pyprob,项目名称:pyprob,代码行数:25,代码来源:inference_network.py

示例7: get_lr_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def get_lr_scheduler(optimizer_conf, scheduler_name, optimizer, initial_epoch=-1):
  if scheduler_name == 'multistep':
    return lr_scheduler.MultiStepLR(optimizer,
                                    optimizer_conf.decay_steps,
                                    optimizer_conf.decay_factor,
                                    initial_epoch)
  elif scheduler_name == 'linear' or scheduler_name == 'polynomial':
    power = 1.0 if scheduler_name == 'linear' else optimizer_conf.decay_power
    lr_lambda = _get_polynomial_decay(optimizer_conf.learning_rate,
                                      optimizer_conf.end_learning_rate,
                                      optimizer_conf.decay_steps,
                                      optimizer_conf.get_attr('start_decay',
                                                              default=0),
                                      power)
    return lr_scheduler.LambdaLR(optimizer, lr_lambda, initial_epoch)
  else:
    raise ValueError('Unknown learning rate scheduler {}'.format(scheduler_name)) 
开发者ID:mseitzer,项目名称:srgan,代码行数:19,代码来源:lr_schedulers.py

示例8: train_and_eval

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def train_and_eval(model, train_loader, eval_loader, tb_log, ckpt_dir, log_f):
    model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    def lr_lbmd(cur_epoch):
        cur_decay = 1
        for decay_step in args.decay_step_list:
            if cur_epoch >= decay_step:
                cur_decay = cur_decay * args.lr_decay
        return max(cur_decay, args.lr_clip / args.lr)

    lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)

    total_it = 0
    for epoch in range(1, args.epochs + 1):
        lr_scheduler.step(epoch)
        total_it = train_one_epoch(model, train_loader, optimizer, epoch, lr_scheduler, total_it, tb_log, log_f)

        if epoch % args.ckpt_save_interval == 0:
            with torch.no_grad():
                avg_iou = eval_one_epoch(model, eval_loader, epoch, tb_log, log_f)
                ckpt_name = os.path.join(ckpt_dir, 'checkpoint_epoch_%d' % epoch)
                save_checkpoint(model, epoch, ckpt_name) 
开发者ID:sshaoshuai,项目名称:Pointnet2.PyTorch,代码行数:25,代码来源:train_and_eval.py

示例9: make_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def make_scheduler(args, my_optimizer):
    if args.decay_type == 'step':
        scheduler = lrs.StepLR(
            my_optimizer,
            step_size=args.lr_decay,
            gamma=args.gamma
        )
    if args.decay_type.find('step') >= 0:
        milestones = args.decay_type.split('_')
        milestones.pop(0)
        milestones = list(map(lambda x: int(x), milestones))
        print(milestones)
        scheduler = lrs.MultiStepLR(
            my_optimizer,
            milestones=milestones,
            gamma=args.gamma
        )
        
    if args.decay_type == 'restart':
        scheduler = lrs.LambdaLR(my_optimizer, lambda epoch: multistep_restart(args.period, epoch))

    return scheduler 
开发者ID:ChaofWang,项目名称:AWSRN,代码行数:24,代码来源:utility.py

示例10: get_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.nepoch) / float(opt.nepoch_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.nepoch, eta_min=0)
    elif opt.lr_policy == 'cyclic':
        scheduler = CyclicLR(optimizer, base_lr=opt.learning_rate / 10, max_lr=opt.learning_rate,
                             step_size=opt.nepoch_decay, mode='triangular2')
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler


# learning rate schedules 
开发者ID:oxai,项目名称:deepsaber,代码行数:23,代码来源:networks.py

示例11: get_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def get_scheduler(optimizer, opt):
    """Return a learning rate scheduler
    Parameters:
        optimizer          -- the optimizer of the network
        opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
                              opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
    For 'linear', we keep the same learning rate for the first <opt.niter> epochs
    and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
    For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
    See https://pytorch.org/docs/stable/optim.html for more details.
    """
    if opt.lr_policy == 'linear':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
开发者ID:Boyiliee,项目名称:PONO,代码行数:27,代码来源:networks_pono.py

示例12: __init__

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def __init__(self, *, lr_lambda, last_epoch=-1):
        """Sets the learning rate of each parameter group to the initial lr
        times a given function. When last_epoch=-1, sets initial lr as lr.

        Args:
            lr_lambda (function or list): A function which computes a multiplicative
                factor given an integer parameter epoch, or a list of such
                functions, one for each group in optimizer.param_groups.
            last_epoch (int): The index of last epoch. Default: -1.

        Example:
            >>> # Assuming optimizer has two groups.
            >>> lambda1 = lambda epoch: epoch // 30
            >>> lambda2 = lambda epoch: 0.95 ** epoch
            >>> scheduler = LambdaLR(lr_lambda=[lambda1, lambda2])
            >>> scheduler(optimizer)
            >>> for epoch in range(100):
            >>>     train(...)
            >>>     validate(...)
            >>>     scheduler.step(),
        """
        super().__init__(lr_scheduler.LambdaLR, lr_lambda=lr_lambda, last_epoch=last_epoch) 
开发者ID:yoshida-lab,项目名称:XenonPy,代码行数:24,代码来源:lr_scheduler.py

示例13: create_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def create_scheduler(args, optimizer, datasets):
    if args.scheduler == 'step':
        scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=eval(args.milestones), gamma=args.lr_decay)
    elif args.scheduler == 'poly':
        total_step = (len(datasets['train']) / args.batch + 1) * args.epochs
        scheduler = lr_scheduler.LambdaLR(optimizer, lambda x: (1-x/total_step) ** args.power)
    elif args.scheduler == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=args.lr_decay, patience=args.patience)
    elif args.scheduler == 'constant':
        scheduler = lr_scheduler.LambdaLR(optimizer, lambda x: 1)
    elif args.scheduler == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, args.T_max, args.min_lr)
    return scheduler 
开发者ID:miraiaroha,项目名称:ACAN,代码行数:15,代码来源:scheduler.py

示例14: get_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def get_scheduler(optimizer, opt):
    """Return a learning rate scheduler

    Parameters:
        optimizer          -- the optimizer of the network
        opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
                              opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine

    For 'linear', we keep the same learning rate for the first <opt.niter> epochs
    and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
    For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
    See https://pytorch.org/docs/stable/optim.html for more details.
    """
    if opt.lr_policy == 'linear':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
开发者ID:Mingtzge,项目名称:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代码行数:29,代码来源:networks.py

示例15: get_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import LambdaLR [as 别名]
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
开发者ID:joelmoniz,项目名称:DepthNets,代码行数:15,代码来源:networks.py


注:本文中的torch.optim.lr_scheduler.LambdaLR方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。