本文整理汇总了Python中onmt.modules.Embeddings方法的典型用法代码示例。如果您正苦于以下问题:Python modules.Embeddings方法的具体用法?Python modules.Embeddings怎么用?Python modules.Embeddings使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类onmt.modules
的用法示例。
在下文中一共展示了modules.Embeddings方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_encoder
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.rnn_size,
opt.heads, opt.transformer_ff,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
# "rnn" or "brnn"
return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
opt.rnn_size, opt.dropout, embeddings,
opt.bridge)
示例2: build_embeddings
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_embeddings(opt, word_field, feat_fields, for_encoder=True):
"""
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
word_padding_idx = word_field.vocab.stoi[word_field.pad_token]
num_word_embeddings = len(word_field.vocab)
feat_pad_indices = [ff.vocab.stoi[ff.pad_token] for ff in feat_fields]
num_feat_embeddings = [len(ff.vocab) for ff in feat_fields]
emb = Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam"
)
return emb
示例3: build_encoder
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
encoder = TransformerEncoder(
opt.enc_layers,
opt.enc_rnn_size,
opt.heads,
opt.transformer_ff,
opt.dropout,
embeddings
)
elif opt.encoder_type == "cnn":
encoder = CNNEncoder(
opt.enc_layers,
opt.enc_rnn_size,
opt.cnn_kernel_width,
opt.dropout,
embeddings)
elif opt.encoder_type == "mean":
encoder = MeanEncoder(opt.enc_layers, embeddings)
else:
encoder = RNNEncoder(
opt.rnn_type,
opt.brnn,
opt.enc_layers,
opt.enc_rnn_size,
opt.dropout,
embeddings,
opt.bridge
)
return encoder
示例4: build_embeddings
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_embeddings(opt, text_field, for_encoder=True):
"""
Args:
opt: the option in current environment.
text_field(TextMultiField): word and feats field.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
pad_indices = [f.vocab.stoi[f.pad_token] for _, f in text_field]
word_padding_idx, feat_pad_indices = pad_indices[0], pad_indices[1:]
num_embs = [len(f.vocab) for _, f in text_field]
num_word_embeddings, num_feat_embeddings = num_embs[0], num_embs[1:]
fix_word_vecs = opt.fix_word_vecs_enc if for_encoder \
else opt.fix_word_vecs_dec
pos_enc_learned = opt.position_encoding_learned_enc if for_encoder else opt.position_encoding_learned_dec
GPT_representation_mode = opt.GPT_representation_mode if opt.GPT_representation_loc == 'both' or (opt.GPT_representation_loc == 'src' and for_encoder) or (opt.GPT_representation_loc == 'tgt' and not for_encoder) else 'none'
emb = Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
position_encoding_learned=pos_enc_learned,
position_encoding_ctxsize=opt.position_encoding_ctxsize,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam",
fix_word_vecs=fix_word_vecs,
GPT_representation_mode=GPT_representation_mode,
GPT_representation_tgt=not for_encoder
)
return emb
示例5: build_encoder
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
enc_type = opt.encoder_type if opt.model_type == "text" else opt.model_type
return str2enc[enc_type].from_opt(opt, embeddings)
示例6: build_decoder
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
dec_type = "ifrnn" if opt.decoder_type == "rnn" and opt.input_feed \
else opt.decoder_type
return str2dec[dec_type].from_opt(opt, embeddings)
示例7: build_encoder
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
enc_type = opt.encoder_type if opt.model_type == "text" \
or opt.model_type == "vec" else opt.model_type
return str2enc[enc_type].from_opt(opt, embeddings)
示例8: build_encoder
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
enc_type = opt.encoder_type if opt.model_type == "text" \
or opt.model_type == "vec" or opt.model_type == "keyphrase" \
else opt.model_type
return str2enc[enc_type].from_opt(opt, embeddings)
示例9: build_embeddings
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
"""
Build an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
if for_encoder:
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tgt_word_vec_size
word_padding_idx = word_dict.stoi[inputters.PAD_WORD]
num_word_embeddings = len(word_dict)
feats_padding_idx = [feat_dict.stoi[inputters.PAD_WORD]
for feat_dict in feature_dicts]
num_feat_embeddings = [len(feat_dict) for feat_dict in
feature_dicts]
return Embeddings(word_vec_size=embedding_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feats_padding_idx,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam")
示例10: build_decoder
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
if opt.decoder_type == "transformer":
return TransformerDecoder(opt.dec_layers, opt.rnn_size,
opt.heads, opt.transformer_ff,
opt.global_attention, opt.copy_attn,
opt.self_attn_type,
opt.dropout, embeddings)
elif opt.decoder_type == "cnn":
return CNNDecoder(opt.dec_layers, opt.rnn_size,
opt.global_attention, opt.copy_attn,
opt.cnn_kernel_width, opt.dropout,
embeddings)
elif opt.input_feed:
return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
else:
return StdRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
示例11: build_decoder
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
if opt.decoder_type == "transformer":
decoder = TransformerDecoder(
opt.dec_layers,
opt.dec_rnn_size,
opt.heads,
opt.transformer_ff,
opt.global_attention,
opt.copy_attn,
opt.self_attn_type,
opt.dropout,
embeddings
)
elif opt.decoder_type == "cnn":
decoder = CNNDecoder(
opt.dec_layers,
opt.dec_rnn_size,
opt.global_attention,
opt.copy_attn,
opt.cnn_kernel_width,
opt.dropout,
embeddings
)
else:
dec_class = InputFeedRNNDecoder if opt.input_feed else StdRNNDecoder
decoder = dec_class(
opt.rnn_type,
opt.brnn,
opt.dec_layers,
opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn
)
return decoder
示例12: build_embeddings
# 需要导入模块: from onmt import modules [as 别名]
# 或者: from onmt.modules import Embeddings [as 别名]
def build_embeddings(opt, text_field, for_encoder=True):
"""
Args:
opt: the option in current environment.
text_field(TextMultiField): word and feats field.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
if opt.model_type == "vec" and for_encoder:
return VecEmbedding(
opt.feat_vec_size,
emb_dim,
position_encoding=opt.position_encoding,
dropout=(opt.dropout[0] if type(opt.dropout) is list
else opt.dropout),
)
pad_indices = [f.vocab.stoi[f.pad_token] for _, f in text_field]
word_padding_idx, feat_pad_indices = pad_indices[0], pad_indices[1:]
num_embs = [len(f.vocab) for _, f in text_field]
num_word_embeddings, num_feat_embeddings = num_embs[0], num_embs[1:]
fix_word_vecs = opt.fix_word_vecs_enc if for_encoder \
else opt.fix_word_vecs_dec
emb = Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam",
fix_word_vecs=fix_word_vecs
)
return emb