本文整理汇总了Python中allennlp.modules.Seq2SeqEncoder.from_params方法的典型用法代码示例。如果您正苦于以下问题:Python Seq2SeqEncoder.from_params方法的具体用法?Python Seq2SeqEncoder.from_params怎么用?Python Seq2SeqEncoder.from_params使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类allennlp.modules.Seq2SeqEncoder
的用法示例。
在下文中一共展示了Seq2SeqEncoder.from_params方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: from_params
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'CrfTagger':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
label_namespace = params.pop("label_namespace", "labels")
constraint_type = params.pop("constraint_type", None)
dropout = params.pop("dropout", None)
include_start_end_transitions = params.pop("include_start_end_transitions", True)
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
encoder=encoder,
label_namespace=label_namespace,
constraint_type=constraint_type,
dropout=dropout,
include_start_end_transitions=include_start_end_transitions,
initializer=initializer,
regularizer=regularizer)
示例2: test_from_params_builders_encoder_correctly
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def test_from_params_builders_encoder_correctly(self):
# We're just making sure parameters get passed through correctly here, and that the basic
# API works.
params = Params(
{
"type": "lstm",
"bidirectional": True,
"num_layers": 3,
"input_size": 5,
"hidden_size": 7,
"stateful": True,
}
)
encoder = Seq2SeqEncoder.from_params(params)
assert encoder.__class__.__name__ == "LstmSeq2SeqEncoder"
assert encoder._module.__class__.__name__ == "LSTM"
assert encoder._module.num_layers == 3
assert encoder._module.input_size == 5
assert encoder._module.hidden_size == 7
assert encoder._module.bidirectional is True
assert encoder._module.batch_first is True
assert encoder.stateful is True
示例3: test_from_params_builders_encoder_correctly
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def test_from_params_builders_encoder_correctly(self):
# We're just making sure parameters get passed through correctly here, and that the basic
# API works.
params = Params({
u"type": u"lstm",
u"bidirectional": True,
u"num_layers": 3,
u"input_size": 5,
u"hidden_size": 7
})
encoder = Seq2SeqEncoder.from_params(params)
# pylint: disable=protected-access
assert encoder.__class__.__name__ == u'PytorchSeq2SeqWrapper'
assert encoder._module.__class__.__name__ == u'LSTM'
assert encoder._module.num_layers == 3
assert encoder._module.input_size == 5
assert encoder._module.hidden_size == 7
assert encoder._module.bidirectional is True
assert encoder._module.batch_first is True
示例4: test_from_params_requires_batch_first
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def test_from_params_requires_batch_first(self):
params = Params({"type": "lstm", "batch_first": False})
with pytest.raises(ConfigurationError):
Seq2SeqEncoder.from_params(params)
示例5: get_metrics
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def get_metrics(self, reset = False) :
return dict((metric_name, metric.get_metric(reset)) for metric_name, metric in list(self.metrics.items()))
# The FeedForward vs Maxout logic here requires a custom from_params.
示例6: from_params
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def from_params(cls, vocab , params ) : # type: ignore
# pylint: disable=arguments-differ
embedder_params = params.pop(u"text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params)
embedding_dropout = params.pop(u"embedding_dropout")
pre_encode_feedforward = FeedForward.from_params(params.pop(u"pre_encode_feedforward"))
encoder = Seq2SeqEncoder.from_params(params.pop(u"encoder"))
integrator = Seq2SeqEncoder.from_params(params.pop(u"integrator"))
integrator_dropout = params.pop(u"integrator_dropout")
output_layer_params = params.pop(u"output_layer")
if u"activations" in output_layer_params:
output_layer = FeedForward.from_params(output_layer_params)
else:
output_layer = Maxout.from_params(output_layer_params)
elmo = params.pop(u"elmo", None)
if elmo is not None:
elmo = Elmo.from_params(elmo)
use_input_elmo = params.pop_bool(u"use_input_elmo", False)
use_integrator_output_elmo = params.pop_bool(u"use_integrator_output_elmo", False)
initializer = InitializerApplicator.from_params(params.pop(u'initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop(u'regularizer', []))
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
embedding_dropout=embedding_dropout,
pre_encode_feedforward=pre_encode_feedforward,
encoder=encoder,
integrator=integrator,
integrator_dropout=integrator_dropout,
output_layer=output_layer,
elmo=elmo,
use_input_elmo=use_input_elmo,
use_integrator_output_elmo=use_integrator_output_elmo,
initializer=initializer,
regularizer=regularizer)
示例7: test_from_params_requires_batch_first
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def test_from_params_requires_batch_first(self):
params = Params({
u"type": u"lstm",
u"batch_first": False,
})
with pytest.raises(ConfigurationError):
# pylint: disable=unused-variable
encoder = Seq2SeqEncoder.from_params(params)
示例8: from_params
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ProLocalModel':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
seq2seq_encoder_params = params.pop("seq2seq_encoder")
seq2seq_encoder = Seq2SeqEncoder.from_params(seq2seq_encoder_params)
initializer = InitializerApplicator.from_params(params.pop("initializer", []))
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
seq2seq_encoder=seq2seq_encoder,
initializer=initializer)
示例9: from_params
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ProGlobalAblation':
token_embedder_params = params.pop("text_field_embedder")
pos_embedder_params = params.pop("pos_field_embedder")
sent_pos_embedder_params = params.pop("sent_pos_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, token_embedder_params)
pos_field_embedder = TextFieldEmbedder.from_params(vocab, pos_embedder_params)
sent_pos_field_embedder = TextFieldEmbedder.from_params(vocab, sent_pos_embedder_params)
modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer"))
span_end_encoder_before = Seq2SeqEncoder.from_params(params.pop("span_end_encoder_bef"))
span_end_encoder_after = Seq2SeqEncoder.from_params(params.pop("span_end_encoder_aft"))
dropout = params.pop('dropout', 0.2)
init_params = params.pop('initializer', None)
initializer = (InitializerApplicator.from_params(init_params)
if init_params is not None
else InitializerApplicator())
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
pos_field_embedder=pos_field_embedder,
sent_pos_field_embedder=sent_pos_field_embedder,
modeling_layer=modeling_layer,
span_end_encoder_before=span_end_encoder_before,
span_end_encoder_after=span_end_encoder_after,
dropout=dropout,
initializer=initializer)
示例10: from_params
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ProGlobal':
token_embedder_params = params.pop("text_field_embedder")
pos_embedder_params = params.pop("pos_field_embedder")
sent_pos_embedder_params = params.pop("sent_pos_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, token_embedder_params)
pos_field_embedder = TextFieldEmbedder.from_params(vocab, pos_embedder_params)
sent_pos_field_embedder = TextFieldEmbedder.from_params(vocab, sent_pos_embedder_params)
modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer"))
span_end_encoder_before = Seq2SeqEncoder.from_params(params.pop("span_end_encoder_bef"))
span_start_encoder_after = Seq2SeqEncoder.from_params(params.pop("span_start_encoder_aft"))
span_end_encoder_after = Seq2SeqEncoder.from_params(params.pop("span_end_encoder_aft"))
dropout = params.pop('dropout', 0.2)
init_params = params.pop('initializer', None)
initializer = (InitializerApplicator.from_params(init_params)
if init_params is not None
else InitializerApplicator())
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
pos_field_embedder=pos_field_embedder,
sent_pos_field_embedder=sent_pos_field_embedder,
modeling_layer=modeling_layer,
span_start_encoder_after=span_start_encoder_after,
span_end_encoder_before=span_end_encoder_before,
span_end_encoder_after=span_end_encoder_after,
dropout=dropout,
initializer=initializer)
示例11: from_params
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ScaffoldBilstmAttentionClassifier':
with_elmo = params.pop_bool("with_elmo", False)
if with_elmo:
embedder_params = params.pop("elmo_text_field_embedder")
else:
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(embedder_params, vocab=vocab)
# citation_text_encoder = Seq2VecEncoder.from_params(params.pop("citation_text_encoder"))
citation_text_encoder = Seq2SeqEncoder.from_params(params.pop("citation_text_encoder"))
classifier_feedforward = FeedForward.from_params(params.pop("classifier_feedforward"))
classifier_feedforward_2 = FeedForward.from_params(params.pop("classifier_feedforward_2"))
classifier_feedforward_3 = FeedForward.from_params(params.pop("classifier_feedforward_3"))
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
use_lexicon = params.pop_bool("use_lexicon_features", False)
use_sparse_lexicon_features = params.pop_bool("use_sparse_lexicon_features", False)
data_format = params.pop('data_format')
report_auxiliary_metrics = params.pop_bool("report_auxiliary_metrics", False)
predict_mode = params.pop_bool("predict_mode", False)
print(f"pred mode: {predict_mode}")
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
citation_text_encoder=citation_text_encoder,
classifier_feedforward=classifier_feedforward,
classifier_feedforward_2=classifier_feedforward_2,
classifier_feedforward_3=classifier_feedforward_3,
initializer=initializer,
regularizer=regularizer,
report_auxiliary_metrics=report_auxiliary_metrics,
predict_mode=predict_mode)
示例12: from_params
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ESIM':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
projection_feedforward = FeedForward.from_params(params.pop('projection_feedforward'))
inference_encoder = Seq2SeqEncoder.from_params(params.pop("inference_encoder"))
output_feedforward = FeedForward.from_params(params.pop('output_feedforward'))
output_logit = FeedForward.from_params(params.pop('output_logit'))
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
dropout = params.pop("dropout", 0)
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
encoder=encoder,
similarity_function=similarity_function,
projection_feedforward=projection_feedforward,
inference_encoder=inference_encoder,
output_feedforward=output_feedforward,
output_logit=output_logit,
initializer=initializer,
dropout=dropout,
regularizer=regularizer)
示例13: from_params
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
premise_encoder_params = params.pop("premise_encoder", None)
if premise_encoder_params is not None:
premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
else:
premise_encoder = None
hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
if hypothesis_encoder_params is not None:
hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
else:
hypothesis_encoder = None
attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward'))
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
preload_path = params.pop('preload_path', None)
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
attend_feedforward=attend_feedforward,
similarity_function=similarity_function,
compare_feedforward=compare_feedforward,
aggregate_feedforward=aggregate_feedforward,
premise_encoder=premise_encoder,
hypothesis_encoder=hypothesis_encoder,
initializer=initializer,
regularizer=regularizer,
preload_path=preload_path)
示例14: from_params
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'LstmSwag':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
encoder=encoder,
initializer=initializer,
regularizer=regularizer)
示例15: from_params
# 需要导入模块: from allennlp.modules import Seq2SeqEncoder [as 别名]
# 或者: from allennlp.modules.Seq2SeqEncoder import from_params [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'StackedNNAggregateCustom':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = BasicTextFieldEmbedder.from_params(vocab, embedder_params)
embeddings_dropout_value = params.pop("embeddings_dropout", 0.0)
share_encoders = params.pop("share_encoders", False)
# premise encoder
premise_encoder_params = params.pop("premise_encoder", None)
premise_enc_aggregate = params.pop("premise_encoder_aggregate", "max")
if premise_encoder_params is not None:
premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
else:
premise_encoder = None
# hypothesis encoder
if share_encoders:
hypothesis_enc_aggregate = premise_enc_aggregate
hypothesis_encoder = premise_encoder
else:
hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
hypothesis_enc_aggregate = params.pop("hypothesis_encoder_aggregate", "max")
if hypothesis_encoder_params is not None:
hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
else:
hypothesis_encoder = None
aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward'))
init_params = params.pop('initializer', None)
initializer = (InitializerApplicator.from_params(init_params)
if init_params is not None
else InitializerApplicator())
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
aggregate_feedforward=aggregate_feedforward,
premise_encoder=premise_encoder,
hypothesis_encoder=hypothesis_encoder,
initializer=initializer,
aggregate_hypothesis=hypothesis_enc_aggregate,
aggregate_premise=premise_enc_aggregate,
embeddings_dropout_value=embeddings_dropout_value,
share_encoders=share_encoders)