本文整理匯總了Python中onmt.Optim方法的典型用法代碼示例。如果您正苦於以下問題:Python onmt.Optim方法的具體用法?Python onmt.Optim怎麽用?Python onmt.Optim使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類onmt
的用法示例。
在下文中一共展示了onmt.Optim方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: build_optim
# 需要導入模塊: import onmt [as 別名]
# 或者: from onmt import Optim [as 別名]
def build_optim(model, checkpoint):
if opt.train_from:
print('Loading optimizer from checkpoint.')
optim = checkpoint['optim']
optim.optimizer.load_state_dict(
checkpoint['optim'].optimizer.state_dict())
else:
optim = onmt.Optim(opt.optim, # SGD by default
opt.learning_rate,
opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_at=opt.start_decay_at,
beta1=opt.adam_beta1,
beta2=opt.adam_beta2,
adagrad_accum=opt.adagrad_accumulator_init,
decay_method=opt.decay_method,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size)
optim.set_parameters(model.parameters())
return optim
示例2: build_optim
# 需要導入模塊: import onmt [as 別名]
# 或者: from onmt import Optim [as 別名]
def build_optim(model, checkpoint):
if opt.train_from:
print('Loading optimizer from checkpoint.')
optim = checkpoint['optim']
optim.optimizer.load_state_dict(
checkpoint['optim'].optimizer.state_dict())
else:
optim = onmt.Optim(
opt.optim, opt.learning_rate, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_at=opt.start_decay_at,
beta1=opt.adam_beta1,
beta2=opt.adam_beta2,
adagrad_accum=opt.adagrad_accumulator_init,
decay_method=opt.decay_method,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size)
optim.set_parameters(model.parameters())
return optim
示例3: build_optim
# 需要導入模塊: import onmt [as 別名]
# 或者: from onmt import Optim [as 別名]
def build_optim(model, checkpoint):
if opt.train_from:
print('Loading optimizer from checkpoint.')
optim = checkpoint['optim']
optim.optimizer.load_state_dict(
checkpoint['optim'].optimizer.state_dict())
else:
print('Making optimizer for training.')
optim = onmt.Optim(
opt.optim, opt.learning_rate, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_at=opt.start_decay_at,
beta1=opt.adam_beta1,
beta2=opt.adam_beta2,
adagrad_accum=opt.adagrad_accumulator_init,
decay_method=opt.decay_method,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size)
optim.set_parameters(model.named_parameters())
return optim
示例4: create_optimizer
# 需要導入模塊: import onmt [as 別名]
# 或者: from onmt import Optim [as 別名]
def create_optimizer(model_or_iterable, options=None):
if options is None: options = copy.deepcopy(onmt.standard_options.stdOptions)
if not isinstance(options, dict):
options = mhf.convertToDictionary(options)
options = handle_options(options)
options = mhf.convertToNamedTuple(options)
optim = onmt.Optim(
options.optim, options.learning_rate, options.max_grad_norm,
lr_decay=options.learning_rate_decay,
start_decay_at=options.start_decay_at,
opt=options)
try:
optim.set_parameters(model_or_iterable.parameters())
except AttributeError:
optim.set_parameters(model_or_iterable)
return optim
示例5: build_optim
# 需要導入模塊: import onmt [as 別名]
# 或者: from onmt import Optim [as 別名]
def build_optim(model, checkpoint):
if opt.train_from:
print('Loading optimizer from checkpoint.')
optim = checkpoint['optim']
optim.optimizer.load_state_dict(
checkpoint['optim'].optimizer.state_dict())
else:
# what members of opt does Optim need?
optim = onmt.Optim(
opt.optim, opt.learning_rate, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_at=opt.start_decay_at,
beta1=opt.adam_beta1,
beta2=opt.adam_beta2,
adagrad_accum=opt.adagrad_accumulator_init,
opt=opt
)
optim.set_parameters(model.parameters())
return optim