当前位置: 首页>>代码示例>>Python>>正文


Python optim.Adamax方法代码示例

本文整理汇总了Python中torch.optim.Adamax方法的典型用法代码示例。如果您正苦于以下问题:Python optim.Adamax方法的具体用法?Python optim.Adamax怎么用?Python optim.Adamax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.optim的用法示例。


在下文中一共展示了optim.Adamax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: init_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def init_optimizer(self, state_dict=None):
        """Initialize an optimizer for the free parameters of the network.

        Args:
            state_dict: network parameters
        """
        if self.args.fix_embeddings:
            for p in self.network.embedding.parameters():
                p.requires_grad = False
        parameters = [p for p in self.network.parameters() if p.requires_grad]
        if self.args.optimizer == 'sgd':
            self.optimizer = optim.SGD(parameters, self.args.learning_rate,
                                       momentum=self.args.momentum,
                                       weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adamax':
            self.optimizer = optim.Adamax(parameters,
                                          weight_decay=self.args.weight_decay)
        else:
            raise RuntimeError('Unsupported optimizer: %s' %
                               self.args.optimizer)

    # --------------------------------------------------------------------------
    # Learning
    # -------------------------------------------------------------------------- 
开发者ID:ailabstw,项目名称:justcopy-backend,代码行数:26,代码来源:model.py

示例2: test_optimizer_init

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def test_optimizer_init(self):
        trainer = Trainer(**base_config, optimizer="sgd")
        trainer.fit(model, [dataloaders[0]])
        self.assertIsInstance(trainer.optimizer, optim.SGD)

        trainer = Trainer(**base_config, optimizer="adam")
        trainer.fit(model, [dataloaders[0]])
        self.assertIsInstance(trainer.optimizer, optim.Adam)

        trainer = Trainer(**base_config, optimizer="adamax")
        trainer.fit(model, [dataloaders[0]])
        self.assertIsInstance(trainer.optimizer, optim.Adamax)

        with self.assertRaisesRegex(ValueError, "Unrecognized optimizer"):
            trainer = Trainer(**base_config, optimizer="foo")
            trainer.fit(model, [dataloaders[0]]) 
开发者ID:snorkel-team,项目名称:snorkel,代码行数:18,代码来源:test_trainer.py

示例3: setup_model

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def setup_model(self, vocab_embedding):
        self.train_loss = AverageMeter()
        self.network = SDNet(self.opt, vocab_embedding)
        if self.use_cuda:
            self.log('Putting model into GPU')
            self.network.cuda()

        parameters = [p for p in self.network.parameters() if p.requires_grad]
        self.optimizer = optim.Adamax(parameters)
        if 'ADAM2' in self.opt:
            print('ADAM2')
            self.optimizer = optim.Adam(parameters, lr = 0.0001)

        self.updates = 0
        self.epoch_start = 0
        self.loss_func = F.cross_entropy 
开发者ID:microsoft,项目名称:SDNet,代码行数:18,代码来源:SDNetTrainer.py

示例4: _init_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def _init_optimizer(self):
        parameters = [p for p in self.network.parameters() if p.requires_grad]
        if self.config['use_bert'] and self.config.get('finetune_bert', None):
            parameters += [p for p in self.config['bert_model'].parameters() if p.requires_grad]
        if self.config['optimizer'] == 'sgd':
            self.optimizer = optim.SGD(parameters, self.config['learning_rate'],
                                       momentum=self.config['momentum'],
                                       weight_decay=self.config['weight_decay'])
        elif self.config['optimizer'] == 'adam':
            self.optimizer = optim.Adam(parameters, lr=self.config['learning_rate'])
        elif self.config['optimizer'] == 'adamax':
            self.optimizer = optim.Adamax(parameters, lr=self.config['learning_rate'])
        else:
            raise RuntimeError('Unsupported optimizer: %s' % self.config['optimizer'])
        self.scheduler = ReduceLROnPlateau(self.optimizer, mode='max', factor=0.5, \
                    patience=2, verbose=True) 
开发者ID:hugochan,项目名称:RL-based-Graph2Seq-for-NQG,代码行数:18,代码来源:model.py

示例5: create_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def create_optimizer(parameters, opt):
    lr = opt.learning_rate
    # default learning rates:
    # sgd - 0.5, adagrad - 0.01, adadelta - 1, adam - 0.001, adamax - 0.002, asgd - 0.01, rmsprop - 0.01, rprop - 0.01
    optim_method = opt.optim_method.casefold()
    if optim_method == 'sgd':
        optimizer = optim.SGD(parameters, lr=lr if lr else 0.5, weight_decay=opt.weight_decay)
    elif optim_method == 'adagrad':
        optimizer = optim.Adagrad(parameters, lr=lr if lr else 0.01, weight_decay=opt.weight_decay)
    elif optim_method == 'adadelta':
        optimizer = optim.Adadelta(parameters, lr=lr if lr else 1, weight_decay=opt.weight_decay)
    elif optim_method == 'adam':
        optimizer = optim.Adam(parameters, lr=lr if lr else 0.001, weight_decay=opt.weight_decay)
    elif optim_method == 'adamax':
        optimizer = optim.Adamax(parameters, lr=lr if lr else 0.002, weight_decay=opt.weight_decay)
    elif optim_method == 'asgd':
        optimizer = optim.ASGD(parameters, lr=lr if lr else 0.01, t0=5000, weight_decay=opt.weight_decay)
    elif optim_method == 'rmsprop':
        optimizer = optim.RMSprop(parameters, lr=lr if lr else 0.01, weight_decay=opt.weight_decay)
    elif optim_method == 'rprop':
        optimizer = optim.Rprop(parameters, lr=lr if lr else 0.01)
    else:
        raise RuntimeError("Invalid optim method: " + opt.optim_method)
    return optimizer 
开发者ID:RemiLeblond,项目名称:SeaRNN-open,代码行数:26,代码来源:optimization.py

示例6: make_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def make_optimizer(config, model):
    mode = config['mode']
    config = config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]
    lr = config['learning_rate']
    weight_decay = config['weight_decay']
    opt = {
        'sgd': optim.SGD,
        'adadelta': optim.Adadelta,
        'adam': optim.Adam,
        'adamax': optim.Adamax,
        'adagrad': optim.Adagrad,
        'asgd': optim.ASGD,
        'rmsprop': optim.RMSprop,
        'adabound': adabound.AdaBound
    }
    if 'momentum' in config:
        optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay, momentum=config['momentum'])
    else:
        optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay)
    return optimizer 
开发者ID:siat-nlp,项目名称:MAMS-for-ABSA,代码行数:22,代码来源:make_optimizer.py

示例7: init_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def init_optimizer(self, state_dict=None):
        """Initialize an optimizer for the free parameters of the network.

        Args:
            state_dict: network parameters
        """
        if self.args.fix_embeddings:
            for p in self.network.embedding.parameters():
                p.requires_grad = False
        parameters = [p for p in self.network.parameters() if p.requires_grad]
        if self.args.optimizer == 'sgd':
            self.optimizer = optim.SGD(parameters, self.args.learning_rate,
                                       momentum=self.args.momentum,
                                       weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adamax':
            self.optimizer = optim.Adamax(parameters,lr=2e-3,
                                          weight_decay=self.args.weight_decay)
        else:
            raise RuntimeError('Unsupported optimizer: %s' %
                               self.args.optimizer)

    # --------------------------------------------------------------------------
    # Learning
    # -------------------------------------------------------------------------- 
开发者ID:lixinsu,项目名称:RCZoo,代码行数:26,代码来源:model.py

示例8: __init__

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def __init__(self, *, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0):
        """Implements Adamax algorithm (a variant of Adam based on infinity norm).

        It has been proposed in `Adam: A Method for Stochastic Optimization`__.

        Arguments:
            lr (float, optional): learning rate (default: 2e-3)
            betas (Tuple[float, float], optional): coefficients used for computing
                running averages of gradient and its square
            eps (float, optional): term added to the denominator to improve
                numerical stability (default: 1e-8)
            weight_decay (float, optional): weight decay (L2 penalty) (default: 0)

        __ https://arxiv.org/abs/1412.6980
        """

        super().__init__(optim.Adamax, lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) 
开发者ID:yoshida-lab,项目名称:XenonPy,代码行数:19,代码来源:optimizer.py

示例9: init_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def init_optimizer(self, state_dict=None):
        """Initialize an optimizer for the free parameters of the network.

        Args:
            state_dict: network parameters
        """
        if self.args.fix_embeddings:
            for p in self.network.embedding.parameters():
                p.requires_grad = False
        parameters = [p for p in self.network.parameters() if p.requires_grad]
        if self.args.optimizer == 'sgd':
            self.optimizer = optim.SGD(parameters, lr=self.args.learning_rate,
                                       momentum=self.args.momentum,
                                       weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adamax':
            self.optimizer = optim.Adamax(parameters,
                                          weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adadelta':
            self.optimizer = optim.Adadelta(parameters, lr=self.args.learning_rate,
                                            rho=self.args.rho, eps=self.args.eps,
                                            weight_decay=self.args.weight_decay)
        else:
            raise RuntimeError('Unsupported optimizer: %s' %
                               self.args.optimizer)

    # --------------------------------------------------------------------------
    # Learning
    # -------------------------------------------------------------------------- 
开发者ID:HKUST-KnowComp,项目名称:MnemonicReader,代码行数:30,代码来源:model.py

示例10: init_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def init_optimizer(self, state_dict=None):
        """Initialize an optimizer for the free parameters of the network.

        Args:
            state_dict: network parameters
        """
        logger.info("init_optimizer")
        if self.args.fix_embeddings:
            for p in self.network.embedding.parameters():
                p.requires_grad = False
            for p in self.selector.embedding.parameters():
                p.requires_grad = False
        parameters = [p for p in self.network.parameters() if p.requires_grad] 
        parameters = parameters + [p for p in self.selector.parameters() if p.requires_grad]
        if self.args.optimizer == 'sgd':
            self.optimizer = optim.SGD(parameters, self.args.learning_rate,
                                       momentum=self.args.momentum,
                                       weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adamax':
            self.optimizer = optim.Adamax(parameters,
                                          weight_decay=self.args.weight_decay)
        else:
            raise RuntimeError('Unsupported optimizer: %s' %
                               self.args.optimizer)

    # --------------------------------------------------------------------------
    # Learning
    # -------------------------------------------------------------------------- 
开发者ID:thunlp,项目名称:OpenQA,代码行数:30,代码来源:model.py

示例11: get_optimiser

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def get_optimiser(name, net_params, optim_params):
    lr = optim_params['learning_rate']
    momentum = optim_params['momentum']
    weight_decay = optim_params['weight_decay']
    if(name == "SGD"):
        return optim.SGD(net_params, lr, 
            momentum = momentum, weight_decay = weight_decay)
    elif(name == "Adam"):
        return optim.Adam(net_params, lr, weight_decay = 1e-5)
    elif(name == "SparseAdam"):
        return optim.SparseAdam(net_params, lr)
    elif(name == "Adadelta"):
        return optim.Adadelta(net_params, lr, weight_decay = weight_decay)
    elif(name == "Adagrad"):
        return optim.Adagrad(net_params, lr, weight_decay = weight_decay)
    elif(name == "Adamax"):
        return optim.Adamax(net_params, lr, weight_decay = weight_decay)
    elif(name == "ASGD"):
        return optim.ASGD(net_params, lr, weight_decay = weight_decay)
    elif(name == "LBFGS"):
        return optim.LBFGS(net_params, lr)
    elif(name == "RMSprop"):
        return optim.RMSprop(net_params, lr, momentum = momentum,
            weight_decay = weight_decay)
    elif(name == "Rprop"):
        return optim.Rprop(net_params, lr)
    else:
        raise ValueError("unsupported optimizer {0:}".format(name)) 
开发者ID:HiLab-git,项目名称:PyMIC,代码行数:30,代码来源:get_optimizer.py

示例12: _init_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def _init_optimizer(self):
        parameters = [p for p in self.network.parameters() if p.requires_grad]
        if self.config['optimizer'] == 'sgd':
            self.optimizer = optim.SGD(parameters, self.config['learning_rate'],
                                       momentum=self.config['momentum'],
                                       weight_decay=self.config['weight_decay'])
        elif self.config['optimizer'] == 'adamax':
            self.optimizer = optim.Adamax(parameters,
                                          weight_decay=self.config['weight_decay'])
        else:
            raise RuntimeError('Unsupported optimizer: %s' % self.config['optimizer']) 
开发者ID:stanfordnlp,项目名称:coqa-baselines,代码行数:13,代码来源:model.py

示例13: _set_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def _set_optimizer(self) -> None:
        parameters = filter(lambda p: p.requires_grad, self.parameters())

        optimizer_config = self.train_config.optimizer_config
        optimizer_name = self.train_config.optimizer
        optimizer: optim.Optimizer  # type: ignore

        if optimizer_name == "sgd":
            optimizer = optim.SGD(  # type: ignore
                parameters,
                lr=self.train_config.lr,
                weight_decay=self.train_config.l2,
                **optimizer_config.sgd_config._asdict(),
            )
        elif optimizer_name == "adam":
            optimizer = optim.Adam(
                parameters,
                lr=self.train_config.lr,
                weight_decay=self.train_config.l2,
                **optimizer_config.adam_config._asdict(),
            )
        elif optimizer_name == "adamax":
            optimizer = optim.Adamax(  # type: ignore
                parameters,
                lr=self.train_config.lr,
                weight_decay=self.train_config.l2,
                **optimizer_config.adamax_config._asdict(),
            )
        else:
            raise ValueError(f"Unrecognized optimizer option '{optimizer_name}'")

        self.optimizer = optimizer 
开发者ID:snorkel-team,项目名称:snorkel,代码行数:34,代码来源:label_model.py

示例14: _set_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def _set_optimizer(self, model: nn.Module) -> None:
        optimizer_config = self.config.optimizer_config
        optimizer_name = self.config.optimizer

        parameters = filter(lambda p: p.requires_grad, model.parameters())

        optimizer: optim.Optimizer  # type: ignore

        if optimizer_name == "sgd":
            optimizer = optim.SGD(  # type: ignore
                parameters,
                lr=self.config.lr,
                weight_decay=self.config.l2,
                **optimizer_config.sgd_config._asdict(),
            )
        elif optimizer_name == "adam":
            optimizer = optim.Adam(
                parameters,
                lr=self.config.lr,
                weight_decay=self.config.l2,
                **optimizer_config.adam_config._asdict(),
            )
        elif optimizer_name == "adamax":
            optimizer = optim.Adamax(  # type: ignore
                parameters,
                lr=self.config.lr,
                weight_decay=self.config.l2,
                **optimizer_config.adamax_config._asdict(),
            )
        else:
            raise ValueError(f"Unrecognized optimizer option '{optimizer_name}'")

        logging.info(f"Using optimizer {optimizer}")

        self.optimizer = optimizer 
开发者ID:snorkel-team,项目名称:snorkel,代码行数:37,代码来源:trainer.py

示例15: test_optimizer_init

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def test_optimizer_init(self):
        L = np.array([[0, -1, 0], [0, 1, 0]])
        label_model = LabelModel()

        label_model.fit(L, optimizer="sgd", n_epochs=1)
        self.assertIsInstance(label_model.optimizer, optim.SGD)

        label_model.fit(L, optimizer="adam", n_epochs=1)
        self.assertIsInstance(label_model.optimizer, optim.Adam)

        label_model.fit(L, optimizer="adamax", n_epochs=1)
        self.assertIsInstance(label_model.optimizer, optim.Adamax)

        with self.assertRaisesRegex(ValueError, "Unrecognized optimizer"):
            label_model.fit(L, optimizer="bad_optimizer", n_epochs=1) 
开发者ID:snorkel-team,项目名称:snorkel,代码行数:17,代码来源:test_label_model.py


注:本文中的torch.optim.Adamax方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。