当前位置: 首页>>代码示例>>Python>>正文


Python optim.SGD属性代码示例

本文整理汇总了Python中torch.optim.SGD属性的典型用法代码示例。如果您正苦于以下问题:Python optim.SGD属性的具体用法?Python optim.SGD怎么用?Python optim.SGD使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在torch.optim的用法示例。


在下文中一共展示了optim.SGD属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def create_optimizer(args, optim_params):
    if args.optimizer == 'sgd':
        return optim.SGD(optim_params, args.lr, momentum=args.momentum,
                         weight_decay=args.weight_decay)
    elif args.optimizer == 'adagrad':
        return optim.Adagrad(optim_params, args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        return optim.Adam(optim_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay)
    elif args.optimizer == 'amsgrad':
        return optim.Adam(optim_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay, amsgrad=True)
    elif args.optimizer == 'adabound':
        from adabound import AdaBound
        return AdaBound(optim_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma,
                        weight_decay=args.weight_decay)
    else:
        assert args.optimizer == 'amsbound'
        from adabound import AdaBound
        return AdaBound(optim_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma, 
                        weight_decay=args.weight_decay, amsbound=True) 
开发者ID:miraiaroha,项目名称:ACAN,代码行数:25,代码来源:optimizer.py

示例2: get_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def get_optimizer(model_name):
    learning_rate = LEARNING_RATE
    if model_name == 'alexnet':
        param_group = [
            {'params': model.features.parameters(), 'lr': learning_rate}]
        for i in range(6):
            param_group += [{'params': model.classifier[i].parameters(),
                             'lr': learning_rate}]
        param_group += [{'params': model.classifier[6].parameters(),
                         'lr': learning_rate * 10}]
    elif model_name == 'resnet':
        param_group = []
        for k, v in model.named_parameters():
            if not k.__contains__('fc'):
                param_group += [{'params': v, 'lr': learning_rate}]
            else:
                param_group += [{'params': v, 'lr': learning_rate * 10}]
    optimizer = optim.SGD(param_group, momentum=MOMENTUM)
    return optimizer


# Schedule learning rate 
开发者ID:jindongwang,项目名称:transferlearning,代码行数:24,代码来源:finetune_office31.py

示例3: trainIters

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def trainIters(encoder, decoder, epochs, dataset, init_epochs, learning_rate=0.01):
    plot_losses = []

    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
    criterion = nn.NLLLoss()

    for epoch in range(init_epochs, epochs+init_epochs):
        for i, (input_tensor, target_tensor) in enumerate(dataset.gen()):
            loss = train(input_tensor, target_tensor, encoder,
                         decoder, encoder_optimizer, decoder_optimizer, criterion)
            if loss:
                plot_losses.append(loss)
                if i % 1000==0:
                    print("epoch {}, step: {}, loss: {}".format(
                        epoch, i, loss
                    ))
            else:
                print(input_tensor, target_tensor)
        print("save model")
        torch.save(encoder.state_dict(), "epoch_{}_step_{}_encoder_loss_{}.pkl".format(epoch, i, loss))
        torch.save(decoder.state_dict(), "epoch_{}_step_{}_decoder_loss_{}.pkl".format(epoch, i, loss)) 
开发者ID:EvilPsyCHo,项目名称:TaskBot,代码行数:24,代码来源:tutorial.py

示例4: main

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def main():
    best_acc = 0

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print('==> Preparing data..')
    transforms_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])

    dataset_train = CIFAR10(root='../data', train=True, download=True, 
                            transform=transforms_train)

    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, 
                              shuffle=True, num_workers=args.num_worker)

    # there are 10 classes so the dataset name is cifar-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')

    print('==> Making model..')

    net = pyramidnet()
    net = nn.DataParallel(net)
    net = net.to(device)
    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    # optimizer = optim.SGD(net.parameters(), lr=args.lr, 
    #                       momentum=0.9, weight_decay=1e-4)
    
    train(net, criterion, optimizer, train_loader, device) 
开发者ID:dnddnjs,项目名称:pytorch-multigpu,代码行数:38,代码来源:train.py

示例5: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def __init__(self, model, corpus, args, name, tune_pi_only):
        self.model = model
        self.corpus = corpus
        self.args = args
        self.name = name
        self.raw_goal = None
        self.vec_goals_list = None
        self.logprobs = None
        print("Do we only tune the policy: {}".format(tune_pi_only))
        self.opt = optim.SGD(
            [p for n, p in self.model.named_parameters() if 'c2z' in n or not tune_pi_only],
            lr=self.args.rl_lr,
            momentum=self.args.momentum,
            nesterov=(self.args.nesterov and self.args.momentum > 0))
        # self.opt = optim.Adam(self.model.parameters(), lr=0.01)
        # self.opt = optim.RMSprop(self.model.parameters(), lr=0.0005)
        self.all_rewards = []
        self.all_grads = []
        self.model.train() 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:21,代码来源:agent_task.py

示例6: get_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def get_optimizer(option, params):
    opt_alg = 'sgd' if not hasattr(option, 'optim') else option.optim
    if opt_alg == 'sgd':
        optimizer = optim.SGD(params,
                              lr=option.lr_rate,
                              momentum=0.9,
                              nesterov=True,
                              weight_decay=option.l2_reg_weight)

    if opt_alg == 'adam':
        optimizer = optim.Adam(params,
                               lr=option.lr_rate,
                               betas=(0.9, 0.999),
                               weight_decay=option.l2_reg_weight)

    return optimizer 
开发者ID:ozan-oktay,项目名称:Attention-Gated-Networks,代码行数:18,代码来源:utils.py

示例7: get_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def get_optimizer(model):
    parameters = _get_paramters(model)
    opt_lower = cfg.SOLVER.OPTIMIZER.lower()

    if opt_lower == 'sgd':
        optimizer = optim.SGD(
            parameters, lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'adam':
        optimizer = optim.Adam(
            parameters, lr=cfg.SOLVER.LR, eps=cfg.SOLVER.EPSILON, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'adadelta':
        optimizer = optim.Adadelta(
            parameters, lr=cfg.SOLVER.LR, eps=cfg.SOLVER.EPSILON, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'rmsprop':
        optimizer = optim.RMSprop(
            parameters, lr=cfg.SOLVER.LR, alpha=0.9, eps=cfg.SOLVER.EPSILON,
            momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    else:
        raise ValueError("Expected optimizer method in [sgd, adam, adadelta, rmsprop], but received "
                         "{}".format(opt_lower))

    return optimizer 
开发者ID:LikeLy-Journey,项目名称:SegmenTron,代码行数:24,代码来源:optimizer.py

示例8: make_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def make_optimizer(args, my_model):
    trainable = filter(lambda x: x.requires_grad, my_model.parameters())

    if args.optimizer == 'SGD':
        optimizer_function = optim.SGD
        kwargs = {'momentum': args.momentum}
    elif args.optimizer == 'ADAM':
        optimizer_function = optim.Adam
        kwargs = {
            'betas': args.betas,
            'eps': args.epsilon
        }
    elif args.optimizer == 'RMSprop':
        optimizer_function = optim.RMSprop
        kwargs = {'eps': args.epsilon}

    kwargs['lr'] = args.lr
    kwargs['weight_decay'] = args.weight_decay
    
    return optimizer_function(trainable, **kwargs) 
开发者ID:HolmesShuan,项目名称:OISR-PyTorch,代码行数:22,代码来源:utility.py

示例9: train_model

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def train_model(args):
    """Load the data, train the model, test the model, export / save the model
    """
    torch.manual_seed(args.seed)

    # Open our dataset
    train_loader, test_loader = data_utils.load_data(args.test_split,
                                                     args.batch_size)

    # Create the model
    net = model.SonarDNN().double()
    optimizer = optim.SGD(net.parameters(), lr=args.lr,
                          momentum=args.momentum, nesterov=False)

    # Train / Test the model
    for epoch in range(1, args.epochs + 1):
        train(net, train_loader, optimizer, epoch)
        test(net, test_loader)

    # Export the trained model
    torch.save(net.state_dict(), args.model_name)

    if args.model_dir:
        # Save the model to GCS
        data_utils.save_model(args.model_dir, args.model_name) 
开发者ID:GoogleCloudPlatform,项目名称:cloudml-samples,代码行数:27,代码来源:task.py

示例10: _make_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def _make_optimizer(self):
        if self.optimizer is not None:
            return

        # Also prepare optimizer:
        optimizer_name = self.hyperparameters["optimizer"].lower()
        if optimizer_name == "sgd":
            self.optimizer = optim.SGD(
                params=self.parameters(),
                lr=self.hyperparameters["learning_rate"],
                momentum=self.hyperparameters["momentum"],
            )
        elif optimizer_name == "rmsprop":
            self.optimizer = optim.RMSprop(
                params=self.parameters(),
                lr=self.hyperparameters["learning_rate"],
                alpha=self.params["learning_rate_decay"],
                momentum=self.params["momentum"],
            )
        elif optimizer_name == "adam":
            self.optimizer = optim.Adam(
                params=self.parameters(), lr=self.hyperparameters["learning_rate"],
            )
        else:
            raise Exception('Unknown optimizer "%s".' % (self.params["optimizer"])) 
开发者ID:microsoft,项目名称:machine-learning-for-programming-samples,代码行数:27,代码来源:model_torch.py

示例11: optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def optimizer(self):
        # Learning rate here is just a place holder. This will be overwritten
        # at training time.
        return optim.SGD(self.parameters(), lr=0.1) 
开发者ID:ehsanik,项目名称:dogTorch,代码行数:6,代码来源:basemodel.py

示例12: set_train_model

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def set_train_model(self, model):
        print("Initializing training model...")
        self.model = model
        self.trainModel = self.model(config=self)
        #self.trainModel = nn.DataParallel(self.trainModel, device_ids=[2,3,4])
        
        self.trainModel.to(device)
        if self.optimizer != None:
            pass
        elif self.opt_method == "Adagrad" or self.opt_method == "adagrad":
            self.optimizer = optim.Adagrad(
                self.trainModel.parameters(),
                lr=self.alpha,
                lr_decay=self.lr_decay,
                weight_decay=self.weight_decay,
            )
        elif self.opt_method == "Adadelta" or self.opt_method == "adadelta":
            self.optimizer = optim.Adadelta(
                self.trainModel.parameters(),
                lr=self.alpha,
                weight_decay=self.weight_decay,
            )
        elif self.opt_method == "Adam" or self.opt_method == "adam":
            self.optimizer = optim.Adam(
                self.trainModel.parameters(),
                lr=self.alpha,
                weight_decay=self.weight_decay,
            )
        else:
            self.optimizer = optim.SGD(
                self.trainModel.parameters(),
                lr=self.alpha,
                weight_decay=self.weight_decay,
            )
        print("Finish initializing") 
开发者ID:daiquocnguyen,项目名称:ConvKB,代码行数:37,代码来源:Config.py

示例13: get_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def get_optimizer(model):
    learning_rate = args.lr
    param_group = []
    param_group += [{'params': model.base_network.parameters(),
                     'lr': learning_rate}]
    param_group += [{'params': model.classifier_layer.parameters(),
                     'lr': learning_rate * 10}]
    optimizer = optim.SGD(param_group, momentum=args.momentum)
    return optimizer


# Schedule learning rate according to DANN if you want to (while I think this equation is wierd therefore I did not use this one) 
开发者ID:jindongwang,项目名称:transferlearning,代码行数:14,代码来源:main.py

示例14: train

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def train(epoch, model, source_loader, target_loader):
    #最后的全连接层学习率为前面的10倍
    LEARNING_RATE = args.lr / math.pow((1 + 10 * (epoch - 1) / args.epochs), 0.75)
    print("learning rate:", LEARNING_RATE)
    if args.diff_lr:
        optimizer = torch.optim.SGD([
        {'params': model.sharedNet.parameters()},
        {'params': model.Inception.parameters(), 'lr': LEARNING_RATE},
        ], lr=LEARNING_RATE / 10, momentum=args.momentum, weight_decay=args.l2_decay)
    else:
        optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=args.momentum,weight_decay = args.l2_decay)
    model.train()
    tgt_iter = iter(target_loader)
    for batch_idx, (source_data, source_label) in enumerate(source_loader):
        try:
            target_data, _ = tgt_iter.next()
        except Exception as err:
            tgt_iter=iter(target_loader)
            target_data, _ = tgt_iter.next()
        
        if args.cuda:
            source_data, source_label = source_data.cuda(), source_label.cuda()
            target_data = target_data.cuda()
        optimizer.zero_grad()

        s_output, mmd_loss = model(source_data, target_data, source_label)
        soft_loss = F.nll_loss(F.log_softmax(s_output, dim=1), source_label)
        # print((2 / (1 + math.exp(-10 * (epoch) / args.epochs)) - 1))
        if args.gamma == 1:
            gamma = 2 / (1 + math.exp(-10 * (epoch) / args.epochs)) - 1
        if args.gamma == 2:
            gamma = epoch /args.epochs
        loss = soft_loss + gamma * mmd_loss
        loss.backward()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tlabel_Loss: {:.6f}\tmmd_Loss: {:.6f}'.format(
                epoch, batch_idx * len(source_data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item(), soft_loss.item(), mmd_loss.item())) 
开发者ID:jindongwang,项目名称:transferlearning,代码行数:41,代码来源:MRAN.py

示例15: main

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import SGD [as 别名]
def main():
    best_acc = 0

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print('==> Preparing data..')
    transforms_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])

    dataset_train = CIFAR10(root='../data', train=True, download=True, 
                            transform=transforms_train)

    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, 
                              shuffle=True, num_workers=args.num_worker)

    # there are 10 classes so the dataset name is cifar-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')

    print('==> Making model..')

    net = pyramidnet()
    net = net.to(device)
    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=args.lr, 
                          momentum=0.9, weight_decay=1e-4)
    
    train(net, criterion, optimizer, train_loader, device) 
开发者ID:dnddnjs,项目名称:pytorch-multigpu,代码行数:36,代码来源:train.py


注:本文中的torch.optim.SGD属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。