本文整理汇总了Python中torch.optim.ASGD属性的典型用法代码示例。如果您正苦于以下问题:Python optim.ASGD属性的具体用法?Python optim.ASGD怎么用?Python optim.ASGD使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类torch.optim
的用法示例。
在下文中一共展示了optim.ASGD属性的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import ASGD [as 别名]
def create_optimizer(parameters, opt):
lr = opt.learning_rate
# default learning rates:
# sgd - 0.5, adagrad - 0.01, adadelta - 1, adam - 0.001, adamax - 0.002, asgd - 0.01, rmsprop - 0.01, rprop - 0.01
optim_method = opt.optim_method.casefold()
if optim_method == 'sgd':
optimizer = optim.SGD(parameters, lr=lr if lr else 0.5, weight_decay=opt.weight_decay)
elif optim_method == 'adagrad':
optimizer = optim.Adagrad(parameters, lr=lr if lr else 0.01, weight_decay=opt.weight_decay)
elif optim_method == 'adadelta':
optimizer = optim.Adadelta(parameters, lr=lr if lr else 1, weight_decay=opt.weight_decay)
elif optim_method == 'adam':
optimizer = optim.Adam(parameters, lr=lr if lr else 0.001, weight_decay=opt.weight_decay)
elif optim_method == 'adamax':
optimizer = optim.Adamax(parameters, lr=lr if lr else 0.002, weight_decay=opt.weight_decay)
elif optim_method == 'asgd':
optimizer = optim.ASGD(parameters, lr=lr if lr else 0.01, t0=5000, weight_decay=opt.weight_decay)
elif optim_method == 'rmsprop':
optimizer = optim.RMSprop(parameters, lr=lr if lr else 0.01, weight_decay=opt.weight_decay)
elif optim_method == 'rprop':
optimizer = optim.Rprop(parameters, lr=lr if lr else 0.01)
else:
raise RuntimeError("Invalid optim method: " + opt.optim_method)
return optimizer
示例2: make_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import ASGD [as 别名]
def make_optimizer(config, model):
mode = config['mode']
config = config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]
lr = config['learning_rate']
weight_decay = config['weight_decay']
opt = {
'sgd': optim.SGD,
'adadelta': optim.Adadelta,
'adam': optim.Adam,
'adamax': optim.Adamax,
'adagrad': optim.Adagrad,
'asgd': optim.ASGD,
'rmsprop': optim.RMSprop,
'adabound': adabound.AdaBound
}
if 'momentum' in config:
optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay, momentum=config['momentum'])
else:
optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay)
return optimizer
示例3: __init__
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import ASGD [as 别名]
def __init__(self, *, lr=0.002, lambd=0.0001, alpha=0.75, t0=1000000.0, weight_decay=0):
"""Implements Averaged Stochastic Gradient Descent.
It has been proposed in `Acceleration of stochastic approximation by
averaging`_.
Arguments:
lr (float, optional): learning rate (default: 1e-2)
lambd (float, optional): decay term (default: 1e-4)
alpha (float, optional): power for eta update (default: 0.75)
t0 (float, optional): point at which to start averaging (default: 1e6)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. _Acceleration of stochastic approximation by averaging:
http://dl.acm.org/citation.cfm?id=131098
"""
super().__init__(optim.ASGD, lr=lr, lambd=lambd, alpha=alpha, t0=t0, weight_decay=weight_decay)
示例4: get_optimiser
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import ASGD [as 别名]
def get_optimiser(name, net_params, optim_params):
lr = optim_params['learning_rate']
momentum = optim_params['momentum']
weight_decay = optim_params['weight_decay']
if(name == "SGD"):
return optim.SGD(net_params, lr,
momentum = momentum, weight_decay = weight_decay)
elif(name == "Adam"):
return optim.Adam(net_params, lr, weight_decay = 1e-5)
elif(name == "SparseAdam"):
return optim.SparseAdam(net_params, lr)
elif(name == "Adadelta"):
return optim.Adadelta(net_params, lr, weight_decay = weight_decay)
elif(name == "Adagrad"):
return optim.Adagrad(net_params, lr, weight_decay = weight_decay)
elif(name == "Adamax"):
return optim.Adamax(net_params, lr, weight_decay = weight_decay)
elif(name == "ASGD"):
return optim.ASGD(net_params, lr, weight_decay = weight_decay)
elif(name == "LBFGS"):
return optim.LBFGS(net_params, lr)
elif(name == "RMSprop"):
return optim.RMSprop(net_params, lr, momentum = momentum,
weight_decay = weight_decay)
elif(name == "Rprop"):
return optim.Rprop(net_params, lr)
else:
raise ValueError("unsupported optimizer {0:}".format(name))
示例5: get_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import ASGD [as 别名]
def get_optimizer(s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
return optim_fn, optim_params
示例6: _optim
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import ASGD [as 别名]
def _optim(self):
self.params = list(self.encoder.parameters()) + list(self.decoder.parameters())
if self.config.opt == 'adam':
self.optimizer = optim.Adam(self.params, lr=self.config.lr)
elif self.config.opt == 'adadelta':
self.optimizer = optim.Adadelta(self.params, lr=self.config.lr)
elif self.config.opt == 'asgd':
self.optimizer = optim.ASGD(self.params, lr=self.config.lr)
else:
self.optimizer = optim.SGD(self.params, lr=self.config.lr)
示例7: _set_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import ASGD [as 别名]
def _set_optimizer(self, model: EmmentalModel) -> None:
"""Set optimizer for learning process.
Args:
model: The model to set up the optimizer.
"""
optimizer_config = Meta.config["learner_config"]["optimizer_config"]
opt = optimizer_config["optimizer"]
# If Meta.config["learner_config"]["optimizer_config"]["parameters"] is None,
# create a parameter group with all parameters in the model, else load user
# specified parameter groups.
if optimizer_config["parameters"] is None:
parameters = filter(lambda p: p.requires_grad, model.parameters())
else:
parameters = optimizer_config["parameters"](model)
optim_dict = {
# PyTorch optimizer
"asgd": optim.ASGD, # type: ignore
"adadelta": optim.Adadelta, # type: ignore
"adagrad": optim.Adagrad, # type: ignore
"adam": optim.Adam, # type: ignore
"adamw": optim.AdamW, # type: ignore
"adamax": optim.Adamax, # type: ignore
"lbfgs": optim.LBFGS, # type: ignore
"rms_prop": optim.RMSprop, # type: ignore
"r_prop": optim.Rprop, # type: ignore
"sgd": optim.SGD, # type: ignore
"sparse_adam": optim.SparseAdam, # type: ignore
# Customize optimizer
"bert_adam": BertAdam,
}
if opt in ["lbfgs", "r_prop", "sparse_adam"]:
optimizer = optim_dict[opt](
parameters,
lr=optimizer_config["lr"],
**optimizer_config[f"{opt}_config"],
)
elif opt in optim_dict.keys():
optimizer = optim_dict[opt](
parameters,
lr=optimizer_config["lr"],
weight_decay=optimizer_config["l2"],
**optimizer_config[f"{opt}_config"],
)
elif isinstance(opt, optim.Optimizer): # type: ignore
optimizer = opt(parameters)
else:
raise ValueError(f"Unrecognized optimizer option '{opt}'")
self.optimizer = optimizer
if Meta.config["meta_config"]["verbose"]:
logger.info(f"Using optimizer {self.optimizer}")
示例8: get_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import ASGD [as 别名]
def get_optimizer(s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn, optim_params
示例9: get_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import ASGD [as 别名]
def get_optimizer(parameters, s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
optim_params['betas'] = (optim_params.get('beta1', 0.9), optim_params.get('beta2', 0.999))
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adam_inverse_sqrt':
optim_fn = AdamInverseSqrtWithWarmup
optim_params['betas'] = (optim_params.get('beta1', 0.9), optim_params.get('beta2', 0.999))
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn(parameters, **optim_params)
示例10: get_optimizer
# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import ASGD [as 别名]
def get_optimizer(s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getfullargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn, optim_params