當前位置: 首頁>>代碼示例>>Python>>正文


Python optim.Adamax方法代碼示例

本文整理匯總了Python中torch.optim.Adamax方法的典型用法代碼示例。如果您正苦於以下問題:Python optim.Adamax方法的具體用法?Python optim.Adamax怎麽用?Python optim.Adamax使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.optim的用法示例。


在下文中一共展示了optim.Adamax方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: init_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def init_optimizer(self, state_dict=None):
        """Initialize an optimizer for the free parameters of the network.

        Args:
            state_dict: network parameters
        """
        if self.args.fix_embeddings:
            for p in self.network.embedding.parameters():
                p.requires_grad = False
        parameters = [p for p in self.network.parameters() if p.requires_grad]
        if self.args.optimizer == 'sgd':
            self.optimizer = optim.SGD(parameters, self.args.learning_rate,
                                       momentum=self.args.momentum,
                                       weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adamax':
            self.optimizer = optim.Adamax(parameters,
                                          weight_decay=self.args.weight_decay)
        else:
            raise RuntimeError('Unsupported optimizer: %s' %
                               self.args.optimizer)

    # --------------------------------------------------------------------------
    # Learning
    # -------------------------------------------------------------------------- 
開發者ID:ailabstw,項目名稱:justcopy-backend,代碼行數:26,代碼來源:model.py

示例2: test_optimizer_init

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def test_optimizer_init(self):
        trainer = Trainer(**base_config, optimizer="sgd")
        trainer.fit(model, [dataloaders[0]])
        self.assertIsInstance(trainer.optimizer, optim.SGD)

        trainer = Trainer(**base_config, optimizer="adam")
        trainer.fit(model, [dataloaders[0]])
        self.assertIsInstance(trainer.optimizer, optim.Adam)

        trainer = Trainer(**base_config, optimizer="adamax")
        trainer.fit(model, [dataloaders[0]])
        self.assertIsInstance(trainer.optimizer, optim.Adamax)

        with self.assertRaisesRegex(ValueError, "Unrecognized optimizer"):
            trainer = Trainer(**base_config, optimizer="foo")
            trainer.fit(model, [dataloaders[0]]) 
開發者ID:snorkel-team,項目名稱:snorkel,代碼行數:18,代碼來源:test_trainer.py

示例3: setup_model

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def setup_model(self, vocab_embedding):
        self.train_loss = AverageMeter()
        self.network = SDNet(self.opt, vocab_embedding)
        if self.use_cuda:
            self.log('Putting model into GPU')
            self.network.cuda()

        parameters = [p for p in self.network.parameters() if p.requires_grad]
        self.optimizer = optim.Adamax(parameters)
        if 'ADAM2' in self.opt:
            print('ADAM2')
            self.optimizer = optim.Adam(parameters, lr = 0.0001)

        self.updates = 0
        self.epoch_start = 0
        self.loss_func = F.cross_entropy 
開發者ID:microsoft,項目名稱:SDNet,代碼行數:18,代碼來源:SDNetTrainer.py

示例4: _init_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def _init_optimizer(self):
        parameters = [p for p in self.network.parameters() if p.requires_grad]
        if self.config['use_bert'] and self.config.get('finetune_bert', None):
            parameters += [p for p in self.config['bert_model'].parameters() if p.requires_grad]
        if self.config['optimizer'] == 'sgd':
            self.optimizer = optim.SGD(parameters, self.config['learning_rate'],
                                       momentum=self.config['momentum'],
                                       weight_decay=self.config['weight_decay'])
        elif self.config['optimizer'] == 'adam':
            self.optimizer = optim.Adam(parameters, lr=self.config['learning_rate'])
        elif self.config['optimizer'] == 'adamax':
            self.optimizer = optim.Adamax(parameters, lr=self.config['learning_rate'])
        else:
            raise RuntimeError('Unsupported optimizer: %s' % self.config['optimizer'])
        self.scheduler = ReduceLROnPlateau(self.optimizer, mode='max', factor=0.5, \
                    patience=2, verbose=True) 
開發者ID:hugochan,項目名稱:RL-based-Graph2Seq-for-NQG,代碼行數:18,代碼來源:model.py

示例5: create_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def create_optimizer(parameters, opt):
    lr = opt.learning_rate
    # default learning rates:
    # sgd - 0.5, adagrad - 0.01, adadelta - 1, adam - 0.001, adamax - 0.002, asgd - 0.01, rmsprop - 0.01, rprop - 0.01
    optim_method = opt.optim_method.casefold()
    if optim_method == 'sgd':
        optimizer = optim.SGD(parameters, lr=lr if lr else 0.5, weight_decay=opt.weight_decay)
    elif optim_method == 'adagrad':
        optimizer = optim.Adagrad(parameters, lr=lr if lr else 0.01, weight_decay=opt.weight_decay)
    elif optim_method == 'adadelta':
        optimizer = optim.Adadelta(parameters, lr=lr if lr else 1, weight_decay=opt.weight_decay)
    elif optim_method == 'adam':
        optimizer = optim.Adam(parameters, lr=lr if lr else 0.001, weight_decay=opt.weight_decay)
    elif optim_method == 'adamax':
        optimizer = optim.Adamax(parameters, lr=lr if lr else 0.002, weight_decay=opt.weight_decay)
    elif optim_method == 'asgd':
        optimizer = optim.ASGD(parameters, lr=lr if lr else 0.01, t0=5000, weight_decay=opt.weight_decay)
    elif optim_method == 'rmsprop':
        optimizer = optim.RMSprop(parameters, lr=lr if lr else 0.01, weight_decay=opt.weight_decay)
    elif optim_method == 'rprop':
        optimizer = optim.Rprop(parameters, lr=lr if lr else 0.01)
    else:
        raise RuntimeError("Invalid optim method: " + opt.optim_method)
    return optimizer 
開發者ID:RemiLeblond,項目名稱:SeaRNN-open,代碼行數:26,代碼來源:optimization.py

示例6: make_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def make_optimizer(config, model):
    mode = config['mode']
    config = config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]
    lr = config['learning_rate']
    weight_decay = config['weight_decay']
    opt = {
        'sgd': optim.SGD,
        'adadelta': optim.Adadelta,
        'adam': optim.Adam,
        'adamax': optim.Adamax,
        'adagrad': optim.Adagrad,
        'asgd': optim.ASGD,
        'rmsprop': optim.RMSprop,
        'adabound': adabound.AdaBound
    }
    if 'momentum' in config:
        optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay, momentum=config['momentum'])
    else:
        optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay)
    return optimizer 
開發者ID:siat-nlp,項目名稱:MAMS-for-ABSA,代碼行數:22,代碼來源:make_optimizer.py

示例7: init_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def init_optimizer(self, state_dict=None):
        """Initialize an optimizer for the free parameters of the network.

        Args:
            state_dict: network parameters
        """
        if self.args.fix_embeddings:
            for p in self.network.embedding.parameters():
                p.requires_grad = False
        parameters = [p for p in self.network.parameters() if p.requires_grad]
        if self.args.optimizer == 'sgd':
            self.optimizer = optim.SGD(parameters, self.args.learning_rate,
                                       momentum=self.args.momentum,
                                       weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adamax':
            self.optimizer = optim.Adamax(parameters,lr=2e-3,
                                          weight_decay=self.args.weight_decay)
        else:
            raise RuntimeError('Unsupported optimizer: %s' %
                               self.args.optimizer)

    # --------------------------------------------------------------------------
    # Learning
    # -------------------------------------------------------------------------- 
開發者ID:lixinsu,項目名稱:RCZoo,代碼行數:26,代碼來源:model.py

示例8: __init__

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def __init__(self, *, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0):
        """Implements Adamax algorithm (a variant of Adam based on infinity norm).

        It has been proposed in `Adam: A Method for Stochastic Optimization`__.

        Arguments:
            lr (float, optional): learning rate (default: 2e-3)
            betas (Tuple[float, float], optional): coefficients used for computing
                running averages of gradient and its square
            eps (float, optional): term added to the denominator to improve
                numerical stability (default: 1e-8)
            weight_decay (float, optional): weight decay (L2 penalty) (default: 0)

        __ https://arxiv.org/abs/1412.6980
        """

        super().__init__(optim.Adamax, lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) 
開發者ID:yoshida-lab,項目名稱:XenonPy,代碼行數:19,代碼來源:optimizer.py

示例9: init_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def init_optimizer(self, state_dict=None):
        """Initialize an optimizer for the free parameters of the network.

        Args:
            state_dict: network parameters
        """
        if self.args.fix_embeddings:
            for p in self.network.embedding.parameters():
                p.requires_grad = False
        parameters = [p for p in self.network.parameters() if p.requires_grad]
        if self.args.optimizer == 'sgd':
            self.optimizer = optim.SGD(parameters, lr=self.args.learning_rate,
                                       momentum=self.args.momentum,
                                       weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adamax':
            self.optimizer = optim.Adamax(parameters,
                                          weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adadelta':
            self.optimizer = optim.Adadelta(parameters, lr=self.args.learning_rate,
                                            rho=self.args.rho, eps=self.args.eps,
                                            weight_decay=self.args.weight_decay)
        else:
            raise RuntimeError('Unsupported optimizer: %s' %
                               self.args.optimizer)

    # --------------------------------------------------------------------------
    # Learning
    # -------------------------------------------------------------------------- 
開發者ID:HKUST-KnowComp,項目名稱:MnemonicReader,代碼行數:30,代碼來源:model.py

示例10: init_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def init_optimizer(self, state_dict=None):
        """Initialize an optimizer for the free parameters of the network.

        Args:
            state_dict: network parameters
        """
        logger.info("init_optimizer")
        if self.args.fix_embeddings:
            for p in self.network.embedding.parameters():
                p.requires_grad = False
            for p in self.selector.embedding.parameters():
                p.requires_grad = False
        parameters = [p for p in self.network.parameters() if p.requires_grad] 
        parameters = parameters + [p for p in self.selector.parameters() if p.requires_grad]
        if self.args.optimizer == 'sgd':
            self.optimizer = optim.SGD(parameters, self.args.learning_rate,
                                       momentum=self.args.momentum,
                                       weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adamax':
            self.optimizer = optim.Adamax(parameters,
                                          weight_decay=self.args.weight_decay)
        else:
            raise RuntimeError('Unsupported optimizer: %s' %
                               self.args.optimizer)

    # --------------------------------------------------------------------------
    # Learning
    # -------------------------------------------------------------------------- 
開發者ID:thunlp,項目名稱:OpenQA,代碼行數:30,代碼來源:model.py

示例11: get_optimiser

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def get_optimiser(name, net_params, optim_params):
    lr = optim_params['learning_rate']
    momentum = optim_params['momentum']
    weight_decay = optim_params['weight_decay']
    if(name == "SGD"):
        return optim.SGD(net_params, lr, 
            momentum = momentum, weight_decay = weight_decay)
    elif(name == "Adam"):
        return optim.Adam(net_params, lr, weight_decay = 1e-5)
    elif(name == "SparseAdam"):
        return optim.SparseAdam(net_params, lr)
    elif(name == "Adadelta"):
        return optim.Adadelta(net_params, lr, weight_decay = weight_decay)
    elif(name == "Adagrad"):
        return optim.Adagrad(net_params, lr, weight_decay = weight_decay)
    elif(name == "Adamax"):
        return optim.Adamax(net_params, lr, weight_decay = weight_decay)
    elif(name == "ASGD"):
        return optim.ASGD(net_params, lr, weight_decay = weight_decay)
    elif(name == "LBFGS"):
        return optim.LBFGS(net_params, lr)
    elif(name == "RMSprop"):
        return optim.RMSprop(net_params, lr, momentum = momentum,
            weight_decay = weight_decay)
    elif(name == "Rprop"):
        return optim.Rprop(net_params, lr)
    else:
        raise ValueError("unsupported optimizer {0:}".format(name)) 
開發者ID:HiLab-git,項目名稱:PyMIC,代碼行數:30,代碼來源:get_optimizer.py

示例12: _init_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def _init_optimizer(self):
        parameters = [p for p in self.network.parameters() if p.requires_grad]
        if self.config['optimizer'] == 'sgd':
            self.optimizer = optim.SGD(parameters, self.config['learning_rate'],
                                       momentum=self.config['momentum'],
                                       weight_decay=self.config['weight_decay'])
        elif self.config['optimizer'] == 'adamax':
            self.optimizer = optim.Adamax(parameters,
                                          weight_decay=self.config['weight_decay'])
        else:
            raise RuntimeError('Unsupported optimizer: %s' % self.config['optimizer']) 
開發者ID:stanfordnlp,項目名稱:coqa-baselines,代碼行數:13,代碼來源:model.py

示例13: _set_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def _set_optimizer(self) -> None:
        parameters = filter(lambda p: p.requires_grad, self.parameters())

        optimizer_config = self.train_config.optimizer_config
        optimizer_name = self.train_config.optimizer
        optimizer: optim.Optimizer  # type: ignore

        if optimizer_name == "sgd":
            optimizer = optim.SGD(  # type: ignore
                parameters,
                lr=self.train_config.lr,
                weight_decay=self.train_config.l2,
                **optimizer_config.sgd_config._asdict(),
            )
        elif optimizer_name == "adam":
            optimizer = optim.Adam(
                parameters,
                lr=self.train_config.lr,
                weight_decay=self.train_config.l2,
                **optimizer_config.adam_config._asdict(),
            )
        elif optimizer_name == "adamax":
            optimizer = optim.Adamax(  # type: ignore
                parameters,
                lr=self.train_config.lr,
                weight_decay=self.train_config.l2,
                **optimizer_config.adamax_config._asdict(),
            )
        else:
            raise ValueError(f"Unrecognized optimizer option '{optimizer_name}'")

        self.optimizer = optimizer 
開發者ID:snorkel-team,項目名稱:snorkel,代碼行數:34,代碼來源:label_model.py

示例14: _set_optimizer

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def _set_optimizer(self, model: nn.Module) -> None:
        optimizer_config = self.config.optimizer_config
        optimizer_name = self.config.optimizer

        parameters = filter(lambda p: p.requires_grad, model.parameters())

        optimizer: optim.Optimizer  # type: ignore

        if optimizer_name == "sgd":
            optimizer = optim.SGD(  # type: ignore
                parameters,
                lr=self.config.lr,
                weight_decay=self.config.l2,
                **optimizer_config.sgd_config._asdict(),
            )
        elif optimizer_name == "adam":
            optimizer = optim.Adam(
                parameters,
                lr=self.config.lr,
                weight_decay=self.config.l2,
                **optimizer_config.adam_config._asdict(),
            )
        elif optimizer_name == "adamax":
            optimizer = optim.Adamax(  # type: ignore
                parameters,
                lr=self.config.lr,
                weight_decay=self.config.l2,
                **optimizer_config.adamax_config._asdict(),
            )
        else:
            raise ValueError(f"Unrecognized optimizer option '{optimizer_name}'")

        logging.info(f"Using optimizer {optimizer}")

        self.optimizer = optimizer 
開發者ID:snorkel-team,項目名稱:snorkel,代碼行數:37,代碼來源:trainer.py

示例15: test_optimizer_init

# 需要導入模塊: from torch import optim [as 別名]
# 或者: from torch.optim import Adamax [as 別名]
def test_optimizer_init(self):
        L = np.array([[0, -1, 0], [0, 1, 0]])
        label_model = LabelModel()

        label_model.fit(L, optimizer="sgd", n_epochs=1)
        self.assertIsInstance(label_model.optimizer, optim.SGD)

        label_model.fit(L, optimizer="adam", n_epochs=1)
        self.assertIsInstance(label_model.optimizer, optim.Adam)

        label_model.fit(L, optimizer="adamax", n_epochs=1)
        self.assertIsInstance(label_model.optimizer, optim.Adamax)

        with self.assertRaisesRegex(ValueError, "Unrecognized optimizer"):
            label_model.fit(L, optimizer="bad_optimizer", n_epochs=1) 
開發者ID:snorkel-team,項目名稱:snorkel,代碼行數:17,代碼來源:test_label_model.py


注:本文中的torch.optim.Adamax方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。