本文整理汇总了Python中onmt.ModelConstructor方法的典型用法代码示例。如果您正苦于以下问题:Python onmt.ModelConstructor方法的具体用法?Python onmt.ModelConstructor怎么用?Python onmt.ModelConstructor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类onmt
的用法示例。
在下文中一共展示了onmt.ModelConstructor方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import ModelConstructor [as 别名]
def __init__(self, opt, dummy_opt={}):
# Add in default model arguments, possibly added since training.
self.opt = opt
checkpoint = torch.load(opt.model,
map_location=lambda storage, loc: storage)
self.fields = onmt.IO.ONMTDataset.load_fields(checkpoint['vocab'])
model_opt = checkpoint['opt']
for arg in dummy_opt:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt[arg]
self._type = model_opt.encoder_type
self.copy_attn = model_opt.copy_attn
self.model = onmt.ModelConstructor.make_base_model(
model_opt, self.fields, use_gpu(opt), checkpoint)
self.model.eval()
self.model.generator.eval()
# for debugging
self.beam_accum = None
示例2: main
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import ModelConstructor [as 别名]
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
onmt.opts.model_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Add in default model arguments, possibly added since training.
checkpoint = torch.load(opt.model,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
src_dict = checkpoint['vocab'][1][1]
tgt_dict = checkpoint['vocab'][0][1]
fields = onmt.io.load_fields_from_vocab(checkpoint['vocab'])
model_opt = checkpoint['opt']
for arg in dummy_opt.__dict__:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt.__dict__[arg]
model = onmt.ModelConstructor.make_base_model(
model_opt, fields, use_gpu(opt), checkpoint)
encoder = model.encoder
decoder = model.decoder
encoder_embeddings = encoder.embeddings.word_lut.weight.data.tolist()
decoder_embeddings = decoder.embeddings.word_lut.weight.data.tolist()
print("Writing source embeddings")
write_embeddings(opt.output_dir + "/src_embeddings.txt", src_dict,
encoder_embeddings)
print("Writing target embeddings")
write_embeddings(opt.output_dir + "/tgt_embeddings.txt", tgt_dict,
decoder_embeddings)
print('... done.')
print('Converting model...')
示例3: build_model
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import ModelConstructor [as 别名]
def build_model(model_opt, opt, fields, checkpoint):
print('Building model...')
model = onmt.ModelConstructor.make_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
if len(opt.gpuid) > 1:
print('Multi gpu training: ', opt.gpuid)
model = nn.DataParallel(model, device_ids=opt.gpuid, dim=1)
print(model)
return model
示例4: build_model
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import ModelConstructor [as 别名]
def build_model(model_opt, opt, fields, checkpoint):
print('Building model...')
model = onmt.ModelConstructor.make_base_model(model_opt, fields, use_gpu(opt), checkpoint)
if len(opt.gpuid) > 1:
print('Multi gpu training: ', opt.gpuid)
model = nn.DataParallel(model, device_ids=opt.gpuid, dim=1)
print(model)
return model
示例5: __init__
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import ModelConstructor [as 别名]
def __init__(self, model_loc, gpu=-1, beam_size=5, k=5):
# Simulate all commandline args
parser = argparse.ArgumentParser(
description='translate.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
translate_opts(parser)
self.opt = parser.parse_known_args()[0]
self.opt.model = model_loc
self.opt.beam_size = beam_size
self.opt.batch_size = 1
self.opt.n_best = k
dummy_parser = argparse.ArgumentParser(description='train.py')
model_opts(dummy_parser)
self.dummy_opt = dummy_parser.parse_known_args([])[0]
# Load the model.
self.fields, self.model, self.model_opt = \
onmt.ModelConstructor.load_test_model(
self.opt, self.dummy_opt.__dict__)
# Make GPU decoding possible
self.opt.gpu = gpu
self.opt.cuda = self.opt.gpu > -1
if self.opt.cuda:
torch.cuda.set_device(self.opt.gpu)
# Translator
self.scorer = onmt.translate.GNMTGlobalScorer(
self.opt.alpha,
self.opt.beta)
self.translator = onmt.translate.Translator(
self.model, self.fields,
beam_size=self.opt.beam_size,
n_best=self.opt.n_best,
global_scorer=self.scorer,
max_length=self.opt.max_sent_length,
copy_attn=self.model_opt.copy_attn,
cuda=self.opt.cuda,
beam_trace=self.opt.dump_beam != "")
示例6: main
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import ModelConstructor [as 别名]
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Add in default model arguments, possibly added since training.
checkpoint = torch.load(opt.model,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
src_dict = checkpoint['vocab'][1][1]
tgt_dict = checkpoint['vocab'][0][1]
fields = onmt.io.load_fields_from_vocab(checkpoint['vocab'])
model_opt = checkpoint['opt']
for arg in dummy_opt.__dict__:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt.__dict__[arg]
model = onmt.ModelConstructor.make_base_model(
model_opt, fields, use_gpu(opt), checkpoint)
encoder = model.encoder
decoder = model.decoder
encoder_embeddings = encoder.embeddings.word_lut.weight.data.tolist()
decoder_embeddings = decoder.embeddings.word_lut.weight.data.tolist()
print("Writing source embeddings")
write_embeddings(opt.output_dir + "/src_embeddings.txt", src_dict,
encoder_embeddings)
print("Writing target embeddings")
write_embeddings(opt.output_dir + "/tgt_embeddings.txt", tgt_dict,
decoder_embeddings)
print('... done.')
print('Converting model...')
示例7: create_model
# 需要导入模块: import onmt [as 别名]
# 或者: from onmt import ModelConstructor [as 别名]
def create_model(fields, options=None):
if options is None: options = copy.deepcopy(onmt.standard_options.stdOptions)
if not isinstance(options, dict):
options = mhf.convertToDictionary(options)
options = handle_options(options)
options = mhf.convertToNamedTuple(options)
model = onmt.ModelConstructor.make_base_model(options, fields, USE_CUDA, checkpoint=None)
if len(options.gpuid) > 1:
model = nn.DataParallel(model, device_ids=options.gpuid, dim=1)
return model