當前位置: 首頁>>代碼示例>>Python>>正文


Python lr_scheduler.StepLR方法代碼示例

本文整理匯總了Python中torch.optim.lr_scheduler.StepLR方法的典型用法代碼示例。如果您正苦於以下問題:Python lr_scheduler.StepLR方法的具體用法?Python lr_scheduler.StepLR怎麽用?Python lr_scheduler.StepLR使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.optim.lr_scheduler的用法示例。


在下文中一共展示了lr_scheduler.StepLR方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: train

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    criterion=nn.CrossEntropyLoss().cuda(device)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
    for epoch in range(args.epochs):
        scheduler.step()
        loss_record = AverageMeter()
        acc_record = AverageMeter()
        model.train()
        for batch_idx, (x, label, _) in enumerate(tqdm(train_loader)):
            x, label = x.to(device), label.to(device)
            output = model(x)
            loss = criterion(output, label) 
            acc = accuracy(output, label)
            acc_record.update(acc[0].item(), x.size(0))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Train Epoch: {} Avg Loss: {:.4f} \t Avg Acc: {:.4f}'.format(epoch, loss_record.avg, acc_record.avg))
        test(model, eva_loader, args)
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir)) 
開發者ID:k-han,項目名稱:DTC,代碼行數:25,代碼來源:imagenet_classif.py

示例2: make_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def make_scheduler(params, max_steps):

    name, *args = params.split("_")

    if name == "steplr":

        step_size, gamma = args
        step_size = int(step_size)
        gamma = float(gamma)

        return partial(StepLR, step_size=step_size, gamma=gamma)

    elif name == "1cycle":

        min_lr, max_lr = args
        min_lr = float(min_lr)
        max_lr = float(max_lr)

        return partial(
            OneCycleScheduler, min_lr=min_lr, max_lr=max_lr, max_steps=max_steps) 
開發者ID:ex4sperans,項目名稱:freesound-classification,代碼行數:22,代碼來源:training.py

示例3: get_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch-
                             opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(
            optimizer, step_size=opt.lr_decay_iters, gamma=0.5)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(
            optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
開發者ID:bj80heyue,項目名稱:One_Shot_Face_Reenactment,代碼行數:21,代碼來源:base_net.py

示例4: get_optim

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def get_optim(lr):
    # Lower the learning rate on the VGG fully connected layers by 1/10th. It's a hack, but it helps
    # stabilize the models.
    fc_params = [p for n,p in detector.named_parameters() if n.startswith('roi_fmap') and p.requires_grad]
    non_fc_params = [p for n,p in detector.named_parameters() if not n.startswith('roi_fmap') and p.requires_grad]
    params = [{'params': fc_params, 'lr': lr / 10.0}, {'params': non_fc_params}]
    # params = [p for n,p in detector.named_parameters() if p.requires_grad]

    if conf.adam:
        optimizer = optim.Adadelta(params, weight_decay=conf.l2, lr=lr, eps=1e-3)
    else:
        optimizer = optim.SGD(params, weight_decay=conf.l2, lr=lr, momentum=0.9)

    #scheduler = StepLR(optimizer, step_size=1, gamma=0.5)
    scheduler = ReduceLROnPlateau(optimizer, 'max', patience=2, factor=0.5,
                                  verbose=True, threshold=0.0001, threshold_mode='abs', cooldown=1)
    return optimizer, scheduler 
開發者ID:KaihuaTang,項目名稱:VCTree-Scene-Graph-Generation,代碼行數:19,代碼來源:train_rels.py

示例5: make_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def make_scheduler(args, my_optimizer):
    if args.decay_type == 'step':
        scheduler = lrs.StepLR(
            my_optimizer,
            step_size=args.lr_decay,
            gamma=args.gamma
        )
    elif args.decay_type.find('step') >= 0:
        milestones = args.decay_type.split('_')
        milestones.pop(0)
        milestones = list(map(lambda x: int(x), milestones))
        scheduler = lrs.MultiStepLR(
            my_optimizer,
            milestones=milestones,
            gamma=args.gamma
        )

    return scheduler 
開發者ID:ofsoundof,項目名稱:3D_Appearance_SR,代碼行數:20,代碼來源:utility.py

示例6: make_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def make_scheduler(args, my_optimizer):
    if args.decay_type == 'step':
        scheduler = lrs.StepLR(
            my_optimizer,
            step_size=args.lr_decay,
            gamma=args.gamma
        )
    if args.decay_type.find('step') >= 0:
        milestones = args.decay_type.split('_')
        milestones.pop(0)
        milestones = list(map(lambda x: int(x), milestones))
        print(milestones)
        scheduler = lrs.MultiStepLR(
            my_optimizer,
            milestones=milestones,
            gamma=args.gamma
        )
        
    if args.decay_type == 'restart':
        scheduler = lrs.LambdaLR(my_optimizer, lambda epoch: multistep_restart(args.period, epoch))

    return scheduler 
開發者ID:ChaofWang,項目名稱:AWSRN,代碼行數:24,代碼來源:utility.py

示例7: test_create_lr_scheduler_with_warmup_with_real_model

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def test_create_lr_scheduler_with_warmup_with_real_model(dummy_model_factory):

    model = dummy_model_factory(with_grads=False, with_frozen_layer=False)
    init_lr = 0.01
    optimizer = torch.optim.SGD(model.parameters(), lr=init_lr)
    scaled_lr = 0.02
    warmup_duration = 5
    step_size = 2
    gamma = 0.97

    output_simulated_values = [None] * 50

    create_lr_scheduler_with_warmup(
        torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma),
        warmup_start_value=0.0,
        warmup_end_value=scaled_lr,
        warmup_duration=warmup_duration,
        output_simulated_values=output_simulated_values,
    )

    assert output_simulated_values[0] == [0, 0.0]
    assert output_simulated_values[warmup_duration - 1] == [warmup_duration - 1, scaled_lr]
    assert output_simulated_values[warmup_duration] == [warmup_duration, init_lr]
    v = [warmup_duration + step_size, init_lr * gamma]
    assert output_simulated_values[warmup_duration + step_size] == v 
開發者ID:pytorch,項目名稱:ignite,代碼行數:27,代碼來源:test_param_scheduler.py

示例8: initialize

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def initialize(config):
    model = get_model(config["model"])
    # Adapt model for distributed settings if configured
    model = idist.auto_model(model)

    optimizer = optim.SGD(
        model.parameters(),
        lr=config.get("learning_rate", 0.1),
        momentum=config.get("momentum", 0.9),
        weight_decay=config.get("weight_decay", 1e-5),
        nesterov=True,
    )
    optimizer = idist.auto_optim(optimizer)
    criterion = nn.CrossEntropyLoss().to(idist.device())

    le = config["num_iters_per_epoch"]
    lr_scheduler = StepLR(optimizer, step_size=le, gamma=0.9)

    return model, optimizer, criterion, lr_scheduler


# slide 1 #################################################################### 
開發者ID:pytorch,項目名稱:ignite,代碼行數:24,代碼來源:teaser.py

示例9: get_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.nepoch) / float(opt.nepoch_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.nepoch, eta_min=0)
    elif opt.lr_policy == 'cyclic':
        scheduler = CyclicLR(optimizer, base_lr=opt.learning_rate / 10, max_lr=opt.learning_rate,
                             step_size=opt.nepoch_decay, mode='triangular2')
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler


# learning rate schedules 
開發者ID:oxai,項目名稱:deepsaber,代碼行數:23,代碼來源:networks.py

示例10: configure_lr_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def configure_lr_scheduler(self, optimizer, cfg):
        if cfg.SCHEDULER == 'step':
            scheduler = lr_scheduler.StepLR(optimizer, step_size=cfg.STEPS[0], gamma=cfg.GAMMA)
        elif cfg.SCHEDULER == 'multi_step':
            scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=cfg.STEPS, gamma=cfg.GAMMA)
        elif cfg.SCHEDULER == 'exponential':
            scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=cfg.GAMMA)
        elif cfg.SCHEDULER == 'SGDR':
            scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg.MAX_EPOCHS)
        else:
            AssertionError('scheduler can not be recognized.')
        return scheduler 
開發者ID:ShuangXieIrene,項目名稱:ssds.pytorch,代碼行數:14,代碼來源:ssds_train.py

示例11: get_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def get_scheduler(optimizer, opt):
    """Return a learning rate scheduler

    Parameters:
        optimizer          -- the optimizer of the network
        opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
                              opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine

    For 'linear', we keep the same learning rate for the first <opt.niter> epochs
    and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
    For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
    See https://pytorch.org/docs/stable/optim.html for more details.
    """
    if opt.lr_policy == 'linear':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    elif opt.lr_policy == 'cosine':
        scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
開發者ID:Mingtzge,項目名稱:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代碼行數:29,代碼來源:networks.py

示例12: get_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def get_scheduler(optimizer, opt):
    if opt.lr_policy == 'lambda':
        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
            return lr_l
        scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
    elif opt.lr_policy == 'step':
        scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
    elif opt.lr_policy == 'plateau':
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
    else:
        return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
    return scheduler 
開發者ID:joelmoniz,項目名稱:DepthNets,代碼行數:15,代碼來源:networks.py

示例13: step

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def step(optimizer, last_epoch, step_size=80, gamma=0.1, **_):
  return lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch) 
開發者ID:pudae,項目名稱:kaggle-humpback,代碼行數:4,代碼來源:scheduler_factory.py

示例14: none

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def none(optimizer, last_epoch, **_):
  return lr_scheduler.StepLR(optimizer, step_size=10000000, last_epoch=last_epoch) 
開發者ID:pudae,項目名稱:kaggle-humpback,代碼行數:4,代碼來源:scheduler_factory.py

示例15: make_scheduler

# 需要導入模塊: from torch.optim import lr_scheduler [as 別名]
# 或者: from torch.optim.lr_scheduler import StepLR [as 別名]
def make_scheduler(args, my_optimizer):
    if args.decay_type == 'step':
        scheduler_function = lrs.StepLR
        kwargs = {'step_size': args.lr_decay, 'gamma': args.gamma}
    elif args.decay_type.find('step') >= 0:
        scheduler_function = lrs.MultiStepLR
        milestones = list(map(lambda x: int(x), args.decay_type.split('-')[1:]))
        kwarg = {'milestones': milestones, 'gamma': args.gamma}

    return scheduler_function(my_optimizer, **kwargs) 
開發者ID:HolmesShuan,項目名稱:OISR-PyTorch,代碼行數:12,代碼來源:utility.py


注:本文中的torch.optim.lr_scheduler.StepLR方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。