本文整理汇总了Python中onmt.modules方法的典型用法代码示例。如果您正苦于以下问题:Python onmt.modules方法的具体用法?Python onmt.modules怎么用?Python onmt.modules使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类onmt
的用法示例。
在下文中一共展示了onmt.modules方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_loss_compute
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import modules [as 别名]
def make_loss_compute(model, tgt_vocab, opt, train=True):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, opt.copy_attn_force,
opt.copy_loss_by_seqlength)
else:
compute = onmt.Loss.NMTLossCompute(
model.generator, tgt_vocab,
label_smoothing=opt.label_smoothing if train else 0.0)
if use_gpu(opt):
compute.cuda()
return compute
示例2: make_loss_compute
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import modules [as 别名]
def make_loss_compute(model, tgt_vocab, opt):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, opt.copy_attn_force)
else:
compute = onmt.Loss.NMTLossCompute(
model.generator, tgt_vocab,
label_smoothing=opt.label_smoothing)
if use_gpu(opt):
compute.cuda()
return compute
示例3: make_loss_compute
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import modules [as 别名]
def make_loss_compute(model, tgt_vocab, opt, train=True):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, opt.copy_attn_force,
opt.copy_loss_by_seqlength)
else:
compute = onmt.Loss.NMTLossCompute(
model.generator, tgt_vocab,
label_smoothing=opt.label_smoothing if train else 0.0,
train_baseline=opt.train_baseline > 0,
)
if use_gpu(opt):
compute.cuda()
return compute
示例4: make_loss_compute
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import modules [as 别名]
def make_loss_compute(model, tgt_vocab, dataset, opt):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, dataset, opt.copy_attn_force)
else:
compute = onmt.Loss.NMTLossCompute(model.generator, tgt_vocab,
opt.label_smoothing)
if use_gpu(opt):
compute.cuda()
return compute
示例5: make_loss_compute
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import modules [as 别名]
def make_loss_compute(model, tgt_vocab, dataset, copy_attn=False,
copy_attn_force=None, use_distillation_loss=False, teacher_model=None):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute* class, by subclassing LossComputeBase.
"""
if use_distillation_loss is True and teacher_model is None:
raise ValueError('To compute distillation loss you have to pass the teacher model generator')
if teacher_model is not None:
teacher_model_generator = teacher_model.generator
else:
teacher_model_generator = None
if copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(model.generator, tgt_vocab, dataset, copy_attn_force)
else:
compute = onmt.Loss.NMTLossCompute(model.generator, tgt_vocab, use_distillation_loss, teacher_model_generator)
if USE_CUDA:
compute = compute.cuda()
return compute