當前位置: 首頁>>代碼示例>>Python>>正文


Python optim.SGD屬性代碼示例

本文整理匯總了Python中torch.optim.SGD屬性的典型用法代碼示例。如果您正苦於以下問題:Python optim.SGD屬性的具體用法?Python optim.SGD怎麽用?Python optim.SGD使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在torch.optim的用法示例。


在下文中一共展示了optim.SGD屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def create_optimizer(args, optim_params):
    if args.optimizer == 'sgd':
        return optim.SGD(optim_params, args.lr, momentum=args.momentum,
                         weight_decay=args.weight_decay)
    elif args.optimizer == 'adagrad':
        return optim.Adagrad(optim_params, args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        return optim.Adam(optim_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay)
    elif args.optimizer == 'amsgrad':
        return optim.Adam(optim_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay, amsgrad=True)
    elif args.optimizer == 'adabound':
        from adabound import AdaBound
        return AdaBound(optim_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma,
                        weight_decay=args.weight_decay)
    else:
        assert args.optimizer == 'amsbound'
        from adabound import AdaBound
        return AdaBound(optim_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma, 
                        weight_decay=args.weight_decay, amsbound=True) 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:25,代碼來源:optimizer.py

示例2: get_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def get_optimizer(model_name):
    learning_rate = LEARNING_RATE
    if model_name == 'alexnet':
        param_group = [
            {'params': model.features.parameters(), 'lr': learning_rate}]
        for i in range(6):
            param_group += [{'params': model.classifier[i].parameters(),
                             'lr': learning_rate}]
        param_group += [{'params': model.classifier[6].parameters(),
                         'lr': learning_rate * 10}]
    elif model_name == 'resnet':
        param_group = []
        for k, v in model.named_parameters():
            if not k.__contains__('fc'):
                param_group += [{'params': v, 'lr': learning_rate}]
            else:
                param_group += [{'params': v, 'lr': learning_rate * 10}]
    optimizer = optim.SGD(param_group, momentum=MOMENTUM)
    return optimizer


# Schedule learning rate 
開發者ID:jindongwang,項目名稱:transferlearning,代碼行數:24,代碼來源:finetune_office31.py

示例3: trainIters

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def trainIters(encoder, decoder, epochs, dataset, init_epochs, learning_rate=0.01):
    plot_losses = []

    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
    criterion = nn.NLLLoss()

    for epoch in range(init_epochs, epochs+init_epochs):
        for i, (input_tensor, target_tensor) in enumerate(dataset.gen()):
            loss = train(input_tensor, target_tensor, encoder,
                         decoder, encoder_optimizer, decoder_optimizer, criterion)
            if loss:
                plot_losses.append(loss)
                if i % 1000==0:
                    print("epoch {}, step: {}, loss: {}".format(
                        epoch, i, loss
                    ))
            else:
                print(input_tensor, target_tensor)
        print("save model")
        torch.save(encoder.state_dict(), "epoch_{}_step_{}_encoder_loss_{}.pkl".format(epoch, i, loss))
        torch.save(decoder.state_dict(), "epoch_{}_step_{}_decoder_loss_{}.pkl".format(epoch, i, loss)) 
開發者ID:EvilPsyCHo,項目名稱:TaskBot,代碼行數:24,代碼來源:tutorial.py

示例4: main

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def main():
    best_acc = 0

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print('==> Preparing data..')
    transforms_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])

    dataset_train = CIFAR10(root='../data', train=True, download=True, 
                            transform=transforms_train)

    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, 
                              shuffle=True, num_workers=args.num_worker)

    # there are 10 classes so the dataset name is cifar-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')

    print('==> Making model..')

    net = pyramidnet()
    net = nn.DataParallel(net)
    net = net.to(device)
    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    # optimizer = optim.SGD(net.parameters(), lr=args.lr, 
    #                       momentum=0.9, weight_decay=1e-4)
    
    train(net, criterion, optimizer, train_loader, device) 
開發者ID:dnddnjs,項目名稱:pytorch-multigpu,代碼行數:38,代碼來源:train.py

示例5: __init__

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def __init__(self, model, corpus, args, name, tune_pi_only):
        self.model = model
        self.corpus = corpus
        self.args = args
        self.name = name
        self.raw_goal = None
        self.vec_goals_list = None
        self.logprobs = None
        print("Do we only tune the policy: {}".format(tune_pi_only))
        self.opt = optim.SGD(
            [p for n, p in self.model.named_parameters() if 'c2z' in n or not tune_pi_only],
            lr=self.args.rl_lr,
            momentum=self.args.momentum,
            nesterov=(self.args.nesterov and self.args.momentum > 0))
        # self.opt = optim.Adam(self.model.parameters(), lr=0.01)
        # self.opt = optim.RMSprop(self.model.parameters(), lr=0.0005)
        self.all_rewards = []
        self.all_grads = []
        self.model.train() 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:21,代碼來源:agent_task.py

示例6: get_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def get_optimizer(option, params):
    opt_alg = 'sgd' if not hasattr(option, 'optim') else option.optim
    if opt_alg == 'sgd':
        optimizer = optim.SGD(params,
                              lr=option.lr_rate,
                              momentum=0.9,
                              nesterov=True,
                              weight_decay=option.l2_reg_weight)

    if opt_alg == 'adam':
        optimizer = optim.Adam(params,
                               lr=option.lr_rate,
                               betas=(0.9, 0.999),
                               weight_decay=option.l2_reg_weight)

    return optimizer 
開發者ID:ozan-oktay,項目名稱:Attention-Gated-Networks,代碼行數:18,代碼來源:utils.py

示例7: get_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def get_optimizer(model):
    parameters = _get_paramters(model)
    opt_lower = cfg.SOLVER.OPTIMIZER.lower()

    if opt_lower == 'sgd':
        optimizer = optim.SGD(
            parameters, lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'adam':
        optimizer = optim.Adam(
            parameters, lr=cfg.SOLVER.LR, eps=cfg.SOLVER.EPSILON, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'adadelta':
        optimizer = optim.Adadelta(
            parameters, lr=cfg.SOLVER.LR, eps=cfg.SOLVER.EPSILON, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'rmsprop':
        optimizer = optim.RMSprop(
            parameters, lr=cfg.SOLVER.LR, alpha=0.9, eps=cfg.SOLVER.EPSILON,
            momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    else:
        raise ValueError("Expected optimizer method in [sgd, adam, adadelta, rmsprop], but received "
                         "{}".format(opt_lower))

    return optimizer 
開發者ID:LikeLy-Journey,項目名稱:SegmenTron,代碼行數:24,代碼來源:optimizer.py

示例8: make_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def make_optimizer(args, my_model):
    trainable = filter(lambda x: x.requires_grad, my_model.parameters())

    if args.optimizer == 'SGD':
        optimizer_function = optim.SGD
        kwargs = {'momentum': args.momentum}
    elif args.optimizer == 'ADAM':
        optimizer_function = optim.Adam
        kwargs = {
            'betas': args.betas,
            'eps': args.epsilon
        }
    elif args.optimizer == 'RMSprop':
        optimizer_function = optim.RMSprop
        kwargs = {'eps': args.epsilon}

    kwargs['lr'] = args.lr
    kwargs['weight_decay'] = args.weight_decay
    
    return optimizer_function(trainable, **kwargs) 
開發者ID:HolmesShuan,項目名稱:OISR-PyTorch,代碼行數:22,代碼來源:utility.py

示例9: train_model

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def train_model(args):
    """Load the data, train the model, test the model, export / save the model
    """
    torch.manual_seed(args.seed)

    # Open our dataset
    train_loader, test_loader = data_utils.load_data(args.test_split,
                                                     args.batch_size)

    # Create the model
    net = model.SonarDNN().double()
    optimizer = optim.SGD(net.parameters(), lr=args.lr,
                          momentum=args.momentum, nesterov=False)

    # Train / Test the model
    for epoch in range(1, args.epochs + 1):
        train(net, train_loader, optimizer, epoch)
        test(net, test_loader)

    # Export the trained model
    torch.save(net.state_dict(), args.model_name)

    if args.model_dir:
        # Save the model to GCS
        data_utils.save_model(args.model_dir, args.model_name) 
開發者ID:GoogleCloudPlatform,項目名稱:cloudml-samples,代碼行數:27,代碼來源:task.py

示例10: _make_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def _make_optimizer(self):
        if self.optimizer is not None:
            return

        # Also prepare optimizer:
        optimizer_name = self.hyperparameters["optimizer"].lower()
        if optimizer_name == "sgd":
            self.optimizer = optim.SGD(
                params=self.parameters(),
                lr=self.hyperparameters["learning_rate"],
                momentum=self.hyperparameters["momentum"],
            )
        elif optimizer_name == "rmsprop":
            self.optimizer = optim.RMSprop(
                params=self.parameters(),
                lr=self.hyperparameters["learning_rate"],
                alpha=self.params["learning_rate_decay"],
                momentum=self.params["momentum"],
            )
        elif optimizer_name == "adam":
            self.optimizer = optim.Adam(
                params=self.parameters(), lr=self.hyperparameters["learning_rate"],
            )
        else:
            raise Exception('Unknown optimizer "%s".' % (self.params["optimizer"])) 
開發者ID:microsoft,項目名稱:machine-learning-for-programming-samples,代碼行數:27,代碼來源:model_torch.py

示例11: optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def optimizer(self):
        # Learning rate here is just a place holder. This will be overwritten
        # at training time.
        return optim.SGD(self.parameters(), lr=0.1) 
開發者ID:ehsanik,項目名稱:dogTorch,代碼行數:6,代碼來源:basemodel.py

示例12: set_train_model

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def set_train_model(self, model):
        print("Initializing training model...")
        self.model = model
        self.trainModel = self.model(config=self)
        #self.trainModel = nn.DataParallel(self.trainModel, device_ids=[2,3,4])
        
        self.trainModel.to(device)
        if self.optimizer != None:
            pass
        elif self.opt_method == "Adagrad" or self.opt_method == "adagrad":
            self.optimizer = optim.Adagrad(
                self.trainModel.parameters(),
                lr=self.alpha,
                lr_decay=self.lr_decay,
                weight_decay=self.weight_decay,
            )
        elif self.opt_method == "Adadelta" or self.opt_method == "adadelta":
            self.optimizer = optim.Adadelta(
                self.trainModel.parameters(),
                lr=self.alpha,
                weight_decay=self.weight_decay,
            )
        elif self.opt_method == "Adam" or self.opt_method == "adam":
            self.optimizer = optim.Adam(
                self.trainModel.parameters(),
                lr=self.alpha,
                weight_decay=self.weight_decay,
            )
        else:
            self.optimizer = optim.SGD(
                self.trainModel.parameters(),
                lr=self.alpha,
                weight_decay=self.weight_decay,
            )
        print("Finish initializing") 
開發者ID:daiquocnguyen,項目名稱:ConvKB,代碼行數:37,代碼來源:Config.py

示例13: get_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def get_optimizer(model):
    learning_rate = args.lr
    param_group = []
    param_group += [{'params': model.base_network.parameters(),
                     'lr': learning_rate}]
    param_group += [{'params': model.classifier_layer.parameters(),
                     'lr': learning_rate * 10}]
    optimizer = optim.SGD(param_group, momentum=args.momentum)
    return optimizer


# Schedule learning rate according to DANN if you want to (while I think this equation is wierd therefore I did not use this one) 
開發者ID:jindongwang,項目名稱:transferlearning,代碼行數:14,代碼來源:main.py

示例14: train

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def train(epoch, model, source_loader, target_loader):
    #最後的全連接層學習率為前麵的10倍
    LEARNING_RATE = args.lr / math.pow((1 + 10 * (epoch - 1) / args.epochs), 0.75)
    print("learning rate:", LEARNING_RATE)
    if args.diff_lr:
        optimizer = torch.optim.SGD([
        {'params': model.sharedNet.parameters()},
        {'params': model.Inception.parameters(), 'lr': LEARNING_RATE},
        ], lr=LEARNING_RATE / 10, momentum=args.momentum, weight_decay=args.l2_decay)
    else:
        optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=args.momentum,weight_decay = args.l2_decay)
    model.train()
    tgt_iter = iter(target_loader)
    for batch_idx, (source_data, source_label) in enumerate(source_loader):
        try:
            target_data, _ = tgt_iter.next()
        except Exception as err:
            tgt_iter=iter(target_loader)
            target_data, _ = tgt_iter.next()
        
        if args.cuda:
            source_data, source_label = source_data.cuda(), source_label.cuda()
            target_data = target_data.cuda()
        optimizer.zero_grad()

        s_output, mmd_loss = model(source_data, target_data, source_label)
        soft_loss = F.nll_loss(F.log_softmax(s_output, dim=1), source_label)
        # print((2 / (1 + math.exp(-10 * (epoch) / args.epochs)) - 1))
        if args.gamma == 1:
            gamma = 2 / (1 + math.exp(-10 * (epoch) / args.epochs)) - 1
        if args.gamma == 2:
            gamma = epoch /args.epochs
        loss = soft_loss + gamma * mmd_loss
        loss.backward()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tlabel_Loss: {:.6f}\tmmd_Loss: {:.6f}'.format(
                epoch, batch_idx * len(source_data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item(), soft_loss.item(), mmd_loss.item())) 
開發者ID:jindongwang,項目名稱:transferlearning,代碼行數:41,代碼來源:MRAN.py

示例15: main

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import SGD [as 別名]
def main():
    best_acc = 0

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print('==> Preparing data..')
    transforms_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])

    dataset_train = CIFAR10(root='../data', train=True, download=True, 
                            transform=transforms_train)

    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, 
                              shuffle=True, num_workers=args.num_worker)

    # there are 10 classes so the dataset name is cifar-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')

    print('==> Making model..')

    net = pyramidnet()
    net = net.to(device)
    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=args.lr, 
                          momentum=0.9, weight_decay=1e-4)
    
    train(net, criterion, optimizer, train_loader, device) 
開發者ID:dnddnjs,項目名稱:pytorch-multigpu,代碼行數:36,代碼來源:train.py


注:本文中的torch.optim.SGD屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。