本文整理匯總了Python中onmt.modules方法的典型用法代碼示例。如果您正苦於以下問題:Python onmt.modules方法的具體用法?Python onmt.modules怎麽用?Python onmt.modules使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類onmt
的用法示例。
在下文中一共展示了onmt.modules方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: make_loss_compute
# 需要導入模塊: import onmt [as 別名]
# 或者: from onmt import modules [as 別名]
def make_loss_compute(model, tgt_vocab, opt, train=True):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, opt.copy_attn_force,
opt.copy_loss_by_seqlength)
else:
compute = onmt.Loss.NMTLossCompute(
model.generator, tgt_vocab,
label_smoothing=opt.label_smoothing if train else 0.0)
if use_gpu(opt):
compute.cuda()
return compute
示例2: make_loss_compute
# 需要導入模塊: import onmt [as 別名]
# 或者: from onmt import modules [as 別名]
def make_loss_compute(model, tgt_vocab, opt):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, opt.copy_attn_force)
else:
compute = onmt.Loss.NMTLossCompute(
model.generator, tgt_vocab,
label_smoothing=opt.label_smoothing)
if use_gpu(opt):
compute.cuda()
return compute
示例3: make_loss_compute
# 需要導入模塊: import onmt [as 別名]
# 或者: from onmt import modules [as 別名]
def make_loss_compute(model, tgt_vocab, opt, train=True):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, opt.copy_attn_force,
opt.copy_loss_by_seqlength)
else:
compute = onmt.Loss.NMTLossCompute(
model.generator, tgt_vocab,
label_smoothing=opt.label_smoothing if train else 0.0,
train_baseline=opt.train_baseline > 0,
)
if use_gpu(opt):
compute.cuda()
return compute
示例4: make_loss_compute
# 需要導入模塊: import onmt [as 別名]
# 或者: from onmt import modules [as 別名]
def make_loss_compute(model, tgt_vocab, dataset, opt):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, dataset, opt.copy_attn_force)
else:
compute = onmt.Loss.NMTLossCompute(model.generator, tgt_vocab,
opt.label_smoothing)
if use_gpu(opt):
compute.cuda()
return compute
示例5: make_loss_compute
# 需要導入模塊: import onmt [as 別名]
# 或者: from onmt import modules [as 別名]
def make_loss_compute(model, tgt_vocab, dataset, copy_attn=False,
copy_attn_force=None, use_distillation_loss=False, teacher_model=None):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute* class, by subclassing LossComputeBase.
"""
if use_distillation_loss is True and teacher_model is None:
raise ValueError('To compute distillation loss you have to pass the teacher model generator')
if teacher_model is not None:
teacher_model_generator = teacher_model.generator
else:
teacher_model_generator = None
if copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(model.generator, tgt_vocab, dataset, copy_attn_force)
else:
compute = onmt.Loss.NMTLossCompute(model.generator, tgt_vocab, use_distillation_loss, teacher_model_generator)
if USE_CUDA:
compute = compute.cuda()
return compute