当前位置: 首页>>代码示例>>Python>>正文


Python lr_scheduler.StepLR方法代码示例

本文整理汇总了Python中torch.optim.lr_scheduler.StepLR方法的典型用法代码示例。如果您正苦于以下问题:Python lr_scheduler.StepLR方法的具体用法?Python lr_scheduler.StepLR怎么用?Python lr_scheduler.StepLR使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.optim.lr_scheduler的用法示例。


在下文中一共展示了lr_scheduler.StepLR方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    criterion=nn.CrossEntropyLoss().cuda(device)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
    for epoch in range(args.epochs):
        scheduler.step()
        loss_record = AverageMeter()
        acc_record = AverageMeter()
        model.train()
        for batch_idx, (x, label, _) in enumerate(tqdm(train_loader)):
            x, label = x.to(device), label.to(device)
            output = model(x)
            loss = criterion(output, label) 
            acc = accuracy(output, label)
            acc_record.update(acc[0].item(), x.size(0))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Train Epoch: {} Avg Loss: {:.4f} \t Avg Acc: {:.4f}'.format(epoch, loss_record.avg, acc_record.avg))
        test(model, eva_loader, args)
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir)) 
开发者ID:k-han,项目名称:DTC,代码行数:25,代码来源:imagenet_classif.py

示例2: make_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def make_scheduler(params, max_steps):

    name, *args = params.split("_")

    if name == "steplr":

        step_size, gamma = args
        step_size = int(step_size)
        gamma = float(gamma)

        return partial(StepLR, step_size=step_size, gamma=gamma)

    elif name == "1cycle":

        min_lr, max_lr = args
        min_lr = float(min_lr)
        max_lr = float(max_lr)

        return partial(
            OneCycleScheduler, min_lr=min_lr, max_lr=max_lr, max_steps=max_steps) 
开发者ID:ex4sperans,项目名称:freesound-classification,代码行数:22,代码来源:training.py

示例3: get_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch-
                             opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(
            optimizer, step_size=opt.lr_decay_iters, gamma=0.5)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(
            optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
开发者ID:bj80heyue,项目名称:One_Shot_Face_Reenactment,代码行数:21,代码来源:base_net.py

示例4: get_optim

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def get_optim(lr):
    # Lower the learning rate on the VGG fully connected layers by 1/10th. It's a hack, but it helps
    # stabilize the models.
    fc_params = [p for n,p in detector.named_parameters() if n.startswith('roi_fmap') and p.requires_grad]
    non_fc_params = [p for n,p in detector.named_parameters() if not n.startswith('roi_fmap') and p.requires_grad]
    params = [{'params': fc_params, 'lr': lr / 10.0}, {'params': non_fc_params}]
    # params = [p for n,p in detector.named_parameters() if p.requires_grad]

    if conf.adam:
        optimizer = optim.Adadelta(params, weight_decay=conf.l2, lr=lr, eps=1e-3)
    else:
        optimizer = optim.SGD(params, weight_decay=conf.l2, lr=lr, momentum=0.9)

    #scheduler = StepLR(optimizer, step_size=1, gamma=0.5)
    scheduler = ReduceLROnPlateau(optimizer, 'max', patience=2, factor=0.5,
                                  verbose=True, threshold=0.0001, threshold_mode='abs', cooldown=1)
    return optimizer, scheduler 
开发者ID:KaihuaTang,项目名称:VCTree-Scene-Graph-Generation,代码行数:19,代码来源:train_rels.py

示例5: make_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def make_scheduler(args, my_optimizer):
    if args.decay_type == 'step':
        scheduler = lrs.StepLR(
            my_optimizer,
            step_size=args.lr_decay,
            gamma=args.gamma
        )
    elif args.decay_type.find('step') >= 0:
        milestones = args.decay_type.split('_')
        milestones.pop(0)
        milestones = list(map(lambda x: int(x), milestones))
        scheduler = lrs.MultiStepLR(
            my_optimizer,
            milestones=milestones,
            gamma=args.gamma
        )

    return scheduler 
开发者ID:ofsoundof,项目名称:3D_Appearance_SR,代码行数:20,代码来源:utility.py

示例6: make_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def make_scheduler(args, my_optimizer):
    if args.decay_type == 'step':
        scheduler = lrs.StepLR(
            my_optimizer,
            step_size=args.lr_decay,
            gamma=args.gamma
        )
    if args.decay_type.find('step') >= 0:
        milestones = args.decay_type.split('_')
        milestones.pop(0)
        milestones = list(map(lambda x: int(x), milestones))
        print(milestones)
        scheduler = lrs.MultiStepLR(
            my_optimizer,
            milestones=milestones,
            gamma=args.gamma
        )
        
    if args.decay_type == 'restart':
        scheduler = lrs.LambdaLR(my_optimizer, lambda epoch: multistep_restart(args.period, epoch))

    return scheduler 
开发者ID:ChaofWang,项目名称:AWSRN,代码行数:24,代码来源:utility.py

示例7: test_create_lr_scheduler_with_warmup_with_real_model

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def test_create_lr_scheduler_with_warmup_with_real_model(dummy_model_factory):

    model = dummy_model_factory(with_grads=False, with_frozen_layer=False)
    init_lr = 0.01
    optimizer = torch.optim.SGD(model.parameters(), lr=init_lr)
    scaled_lr = 0.02
    warmup_duration = 5
    step_size = 2
    gamma = 0.97

    output_simulated_values = [None] * 50

    create_lr_scheduler_with_warmup(
        torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma),
        warmup_start_value=0.0,
        warmup_end_value=scaled_lr,
        warmup_duration=warmup_duration,
        output_simulated_values=output_simulated_values,
    )

    assert output_simulated_values[0] == [0, 0.0]
    assert output_simulated_values[warmup_duration - 1] == [warmup_duration - 1, scaled_lr]
    assert output_simulated_values[warmup_duration] == [warmup_duration, init_lr]
    v = [warmup_duration + step_size, init_lr * gamma]
    assert output_simulated_values[warmup_duration + step_size] == v 
开发者ID:pytorch,项目名称:ignite,代码行数:27,代码来源:test_param_scheduler.py

示例8: initialize

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def initialize(config):
    model = get_model(config["model"])
    # Adapt model for distributed settings if configured
    model = idist.auto_model(model)

    optimizer = optim.SGD(
        model.parameters(),
        lr=config.get("learning_rate", 0.1),
        momentum=config.get("momentum", 0.9),
        weight_decay=config.get("weight_decay", 1e-5),
        nesterov=True,
    )
    optimizer = idist.auto_optim(optimizer)
    criterion = nn.CrossEntropyLoss().to(idist.device())

    le = config["num_iters_per_epoch"]
    lr_scheduler = StepLR(optimizer, step_size=le, gamma=0.9)

    return model, optimizer, criterion, lr_scheduler


# slide 1 #################################################################### 
开发者ID:pytorch,项目名称:ignite,代码行数:24,代码来源:teaser.py

示例9: get_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.nepoch) / float(opt.nepoch_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.nepoch, eta_min=0)
    elif opt.lr_policy == 'cyclic':
        scheduler = CyclicLR(optimizer, base_lr=opt.learning_rate / 10, max_lr=opt.learning_rate,
                             step_size=opt.nepoch_decay, mode='triangular2')
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler


# learning rate schedules 
开发者ID:oxai,项目名称:deepsaber,代码行数:23,代码来源:networks.py

示例10: configure_lr_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def configure_lr_scheduler(self, optimizer, cfg):
        if cfg.SCHEDULER == 'step':
            scheduler = lr_scheduler.StepLR(optimizer, step_size=cfg.STEPS[0], gamma=cfg.GAMMA)
        elif cfg.SCHEDULER == 'multi_step':
            scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=cfg.STEPS, gamma=cfg.GAMMA)
        elif cfg.SCHEDULER == 'exponential':
            scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=cfg.GAMMA)
        elif cfg.SCHEDULER == 'SGDR':
            scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg.MAX_EPOCHS)
        else:
            AssertionError('scheduler can not be recognized.')
        return scheduler 
开发者ID:ShuangXieIrene,项目名称:ssds.pytorch,代码行数:14,代码来源:ssds_train.py

示例11: get_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def get_scheduler(optimizer, opt):
    """Return a learning rate scheduler

    Parameters:
        optimizer          -- the optimizer of the network
        opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
                              opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine

    For 'linear', we keep the same learning rate for the first <opt.niter> epochs
    and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
    For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
    See https://pytorch.org/docs/stable/optim.html for more details.
    """
    if opt.lr_policy == 'linear':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
开发者ID:Mingtzge,项目名称:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代码行数:29,代码来源:networks.py

示例12: get_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
开发者ID:joelmoniz,项目名称:DepthNets,代码行数:15,代码来源:networks.py

示例13: step

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def step(optimizer, last_epoch, step_size=80, gamma=0.1, **_):
  return lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch) 
开发者ID:pudae,项目名称:kaggle-humpback,代码行数:4,代码来源:scheduler_factory.py

示例14: none

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def none(optimizer, last_epoch, **_):
  return lr_scheduler.StepLR(optimizer, step_size=10000000, last_epoch=last_epoch) 
开发者ID:pudae,项目名称:kaggle-humpback,代码行数:4,代码来源:scheduler_factory.py

示例15: make_scheduler

# 需要导入模块: from torch.optim import lr_scheduler [as 别名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 别名]
def make_scheduler(args, my_optimizer):
    if args.decay_type == 'step':
        scheduler_function = lrs.StepLR
        kwargs = {'step_size': args.lr_decay, 'gamma': args.gamma}
    elif args.decay_type.find('step') >= 0:
        scheduler_function = lrs.MultiStepLR
        milestones = list(map(lambda x: int(x), args.decay_type.split('-')[1:]))
        kwarg = {'milestones': milestones, 'gamma': args.gamma}

    return scheduler_function(my_optimizer, **kwargs) 
开发者ID:HolmesShuan,项目名称:OISR-PyTorch,代码行数:12,代码来源:utility.py


注:本文中的torch.optim.lr_scheduler.StepLR方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。