本文整理汇总了Python中torch.optim.Adamax方法的典型用法代码示例。如果您正苦于以下问题:Python optim.Adamax方法的具体用法?Python optim.Adamax怎么用?Python optim.Adamax使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.optim
的用法示例。
在下文中一共展示了optim.Adamax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def init_optimizer(self, state_dict=None):
"""Initialize an optimizer for the free parameters of the network.
Args:
state_dict: network parameters
"""
if self.args.fix_embeddings:
for p in self.network.embedding.parameters():
p.requires_grad = False
parameters = [p for p in self.network.parameters() if p.requires_grad]
if self.args.optimizer == 'sgd':
self.optimizer = optim.SGD(parameters, self.args.learning_rate,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
elif self.args.optimizer == 'adamax':
self.optimizer = optim.Adamax(parameters,
weight_decay=self.args.weight_decay)
else:
raise RuntimeError('Unsupported optimizer: %s' %
self.args.optimizer)
# --------------------------------------------------------------------------
# Learning
# --------------------------------------------------------------------------
示例2: test_optimizer_init
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def test_optimizer_init(self):
trainer = Trainer(**base_config, optimizer="sgd")
trainer.fit(model, [dataloaders[0]])
self.assertIsInstance(trainer.optimizer, optim.SGD)
trainer = Trainer(**base_config, optimizer="adam")
trainer.fit(model, [dataloaders[0]])
self.assertIsInstance(trainer.optimizer, optim.Adam)
trainer = Trainer(**base_config, optimizer="adamax")
trainer.fit(model, [dataloaders[0]])
self.assertIsInstance(trainer.optimizer, optim.Adamax)
with self.assertRaisesRegex(ValueError, "Unrecognized optimizer"):
trainer = Trainer(**base_config, optimizer="foo")
trainer.fit(model, [dataloaders[0]])
示例3: setup_model
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def setup_model(self, vocab_embedding):
self.train_loss = AverageMeter()
self.network = SDNet(self.opt, vocab_embedding)
if self.use_cuda:
self.log('Putting model into GPU')
self.network.cuda()
parameters = [p for p in self.network.parameters() if p.requires_grad]
self.optimizer = optim.Adamax(parameters)
if 'ADAM2' in self.opt:
print('ADAM2')
self.optimizer = optim.Adam(parameters, lr = 0.0001)
self.updates = 0
self.epoch_start = 0
self.loss_func = F.cross_entropy
示例4: _init_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def _init_optimizer(self):
parameters = [p for p in self.network.parameters() if p.requires_grad]
if self.config['use_bert'] and self.config.get('finetune_bert', None):
parameters += [p for p in self.config['bert_model'].parameters() if p.requires_grad]
if self.config['optimizer'] == 'sgd':
self.optimizer = optim.SGD(parameters, self.config['learning_rate'],
momentum=self.config['momentum'],
weight_decay=self.config['weight_decay'])
elif self.config['optimizer'] == 'adam':
self.optimizer = optim.Adam(parameters, lr=self.config['learning_rate'])
elif self.config['optimizer'] == 'adamax':
self.optimizer = optim.Adamax(parameters, lr=self.config['learning_rate'])
else:
raise RuntimeError('Unsupported optimizer: %s' % self.config['optimizer'])
self.scheduler = ReduceLROnPlateau(self.optimizer, mode='max', factor=0.5, \
patience=2, verbose=True)
示例5: create_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def create_optimizer(parameters, opt):
lr = opt.learning_rate
# default learning rates:
# sgd - 0.5, adagrad - 0.01, adadelta - 1, adam - 0.001, adamax - 0.002, asgd - 0.01, rmsprop - 0.01, rprop - 0.01
optim_method = opt.optim_method.casefold()
if optim_method == 'sgd':
optimizer = optim.SGD(parameters, lr=lr if lr else 0.5, weight_decay=opt.weight_decay)
elif optim_method == 'adagrad':
optimizer = optim.Adagrad(parameters, lr=lr if lr else 0.01, weight_decay=opt.weight_decay)
elif optim_method == 'adadelta':
optimizer = optim.Adadelta(parameters, lr=lr if lr else 1, weight_decay=opt.weight_decay)
elif optim_method == 'adam':
optimizer = optim.Adam(parameters, lr=lr if lr else 0.001, weight_decay=opt.weight_decay)
elif optim_method == 'adamax':
optimizer = optim.Adamax(parameters, lr=lr if lr else 0.002, weight_decay=opt.weight_decay)
elif optim_method == 'asgd':
optimizer = optim.ASGD(parameters, lr=lr if lr else 0.01, t0=5000, weight_decay=opt.weight_decay)
elif optim_method == 'rmsprop':
optimizer = optim.RMSprop(parameters, lr=lr if lr else 0.01, weight_decay=opt.weight_decay)
elif optim_method == 'rprop':
optimizer = optim.Rprop(parameters, lr=lr if lr else 0.01)
else:
raise RuntimeError("Invalid optim method: " + opt.optim_method)
return optimizer
示例6: make_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def make_optimizer(config, model):
mode = config['mode']
config = config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]
lr = config['learning_rate']
weight_decay = config['weight_decay']
opt = {
'sgd': optim.SGD,
'adadelta': optim.Adadelta,
'adam': optim.Adam,
'adamax': optim.Adamax,
'adagrad': optim.Adagrad,
'asgd': optim.ASGD,
'rmsprop': optim.RMSprop,
'adabound': adabound.AdaBound
}
if 'momentum' in config:
optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay, momentum=config['momentum'])
else:
optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay)
return optimizer
示例7: init_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def init_optimizer(self, state_dict=None):
"""Initialize an optimizer for the free parameters of the network.
Args:
state_dict: network parameters
"""
if self.args.fix_embeddings:
for p in self.network.embedding.parameters():
p.requires_grad = False
parameters = [p for p in self.network.parameters() if p.requires_grad]
if self.args.optimizer == 'sgd':
self.optimizer = optim.SGD(parameters, self.args.learning_rate,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
elif self.args.optimizer == 'adamax':
self.optimizer = optim.Adamax(parameters,lr=2e-3,
weight_decay=self.args.weight_decay)
else:
raise RuntimeError('Unsupported optimizer: %s' %
self.args.optimizer)
# --------------------------------------------------------------------------
# Learning
# --------------------------------------------------------------------------
示例8: __init__
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def __init__(self, *, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0):
"""Implements Adamax algorithm (a variant of Adam based on infinity norm).
It has been proposed in `Adam: A Method for Stochastic Optimization`__.
Arguments:
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
__ https://arxiv.org/abs/1412.6980
"""
super().__init__(optim.Adamax, lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
示例9: init_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def init_optimizer(self, state_dict=None):
"""Initialize an optimizer for the free parameters of the network.
Args:
state_dict: network parameters
"""
if self.args.fix_embeddings:
for p in self.network.embedding.parameters():
p.requires_grad = False
parameters = [p for p in self.network.parameters() if p.requires_grad]
if self.args.optimizer == 'sgd':
self.optimizer = optim.SGD(parameters, lr=self.args.learning_rate,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
elif self.args.optimizer == 'adamax':
self.optimizer = optim.Adamax(parameters,
weight_decay=self.args.weight_decay)
elif self.args.optimizer == 'adadelta':
self.optimizer = optim.Adadelta(parameters, lr=self.args.learning_rate,
rho=self.args.rho, eps=self.args.eps,
weight_decay=self.args.weight_decay)
else:
raise RuntimeError('Unsupported optimizer: %s' %
self.args.optimizer)
# --------------------------------------------------------------------------
# Learning
# --------------------------------------------------------------------------
示例10: init_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def init_optimizer(self, state_dict=None):
"""Initialize an optimizer for the free parameters of the network.
Args:
state_dict: network parameters
"""
logger.info("init_optimizer")
if self.args.fix_embeddings:
for p in self.network.embedding.parameters():
p.requires_grad = False
for p in self.selector.embedding.parameters():
p.requires_grad = False
parameters = [p for p in self.network.parameters() if p.requires_grad]
parameters = parameters + [p for p in self.selector.parameters() if p.requires_grad]
if self.args.optimizer == 'sgd':
self.optimizer = optim.SGD(parameters, self.args.learning_rate,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
elif self.args.optimizer == 'adamax':
self.optimizer = optim.Adamax(parameters,
weight_decay=self.args.weight_decay)
else:
raise RuntimeError('Unsupported optimizer: %s' %
self.args.optimizer)
# --------------------------------------------------------------------------
# Learning
# --------------------------------------------------------------------------
示例11: get_optimiser
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def get_optimiser(name, net_params, optim_params):
lr = optim_params['learning_rate']
momentum = optim_params['momentum']
weight_decay = optim_params['weight_decay']
if(name == "SGD"):
return optim.SGD(net_params, lr,
momentum = momentum, weight_decay = weight_decay)
elif(name == "Adam"):
return optim.Adam(net_params, lr, weight_decay = 1e-5)
elif(name == "SparseAdam"):
return optim.SparseAdam(net_params, lr)
elif(name == "Adadelta"):
return optim.Adadelta(net_params, lr, weight_decay = weight_decay)
elif(name == "Adagrad"):
return optim.Adagrad(net_params, lr, weight_decay = weight_decay)
elif(name == "Adamax"):
return optim.Adamax(net_params, lr, weight_decay = weight_decay)
elif(name == "ASGD"):
return optim.ASGD(net_params, lr, weight_decay = weight_decay)
elif(name == "LBFGS"):
return optim.LBFGS(net_params, lr)
elif(name == "RMSprop"):
return optim.RMSprop(net_params, lr, momentum = momentum,
weight_decay = weight_decay)
elif(name == "Rprop"):
return optim.Rprop(net_params, lr)
else:
raise ValueError("unsupported optimizer {0:}".format(name))
示例12: _init_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def _init_optimizer(self):
parameters = [p for p in self.network.parameters() if p.requires_grad]
if self.config['optimizer'] == 'sgd':
self.optimizer = optim.SGD(parameters, self.config['learning_rate'],
momentum=self.config['momentum'],
weight_decay=self.config['weight_decay'])
elif self.config['optimizer'] == 'adamax':
self.optimizer = optim.Adamax(parameters,
weight_decay=self.config['weight_decay'])
else:
raise RuntimeError('Unsupported optimizer: %s' % self.config['optimizer'])
示例13: _set_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def _set_optimizer(self) -> None:
parameters = filter(lambda p: p.requires_grad, self.parameters())
optimizer_config = self.train_config.optimizer_config
optimizer_name = self.train_config.optimizer
optimizer: optim.Optimizer # type: ignore
if optimizer_name == "sgd":
optimizer = optim.SGD( # type: ignore
parameters,
lr=self.train_config.lr,
weight_decay=self.train_config.l2,
**optimizer_config.sgd_config._asdict(),
)
elif optimizer_name == "adam":
optimizer = optim.Adam(
parameters,
lr=self.train_config.lr,
weight_decay=self.train_config.l2,
**optimizer_config.adam_config._asdict(),
)
elif optimizer_name == "adamax":
optimizer = optim.Adamax( # type: ignore
parameters,
lr=self.train_config.lr,
weight_decay=self.train_config.l2,
**optimizer_config.adamax_config._asdict(),
)
else:
raise ValueError(f"Unrecognized optimizer option '{optimizer_name}'")
self.optimizer = optimizer
示例14: _set_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def _set_optimizer(self, model: nn.Module) -> None:
optimizer_config = self.config.optimizer_config
optimizer_name = self.config.optimizer
parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer: optim.Optimizer # type: ignore
if optimizer_name == "sgd":
optimizer = optim.SGD( # type: ignore
parameters,
lr=self.config.lr,
weight_decay=self.config.l2,
**optimizer_config.sgd_config._asdict(),
)
elif optimizer_name == "adam":
optimizer = optim.Adam(
parameters,
lr=self.config.lr,
weight_decay=self.config.l2,
**optimizer_config.adam_config._asdict(),
)
elif optimizer_name == "adamax":
optimizer = optim.Adamax( # type: ignore
parameters,
lr=self.config.lr,
weight_decay=self.config.l2,
**optimizer_config.adamax_config._asdict(),
)
else:
raise ValueError(f"Unrecognized optimizer option '{optimizer_name}'")
logging.info(f"Using optimizer {optimizer}")
self.optimizer = optimizer
示例15: test_optimizer_init
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adamax [as 别名]
def test_optimizer_init(self):
L = np.array([[0, -1, 0], [0, 1, 0]])
label_model = LabelModel()
label_model.fit(L, optimizer="sgd", n_epochs=1)
self.assertIsInstance(label_model.optimizer, optim.SGD)
label_model.fit(L, optimizer="adam", n_epochs=1)
self.assertIsInstance(label_model.optimizer, optim.Adam)
label_model.fit(L, optimizer="adamax", n_epochs=1)
self.assertIsInstance(label_model.optimizer, optim.Adamax)
with self.assertRaisesRegex(ValueError, "Unrecognized optimizer"):
label_model.fit(L, optimizer="bad_optimizer", n_epochs=1)