当前位置: 首页>>代码示例>>Python>>正文


Python modules.FeedForward方法代码示例

本文整理汇总了Python中allennlp.modules.FeedForward方法的典型用法代码示例。如果您正苦于以下问题:Python modules.FeedForward方法的具体用法?Python modules.FeedForward怎么用?Python modules.FeedForward使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.modules的用法示例。


在下文中一共展示了modules.FeedForward方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_feedforward_encoder_exactly_match_feedforward_each_item

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def test_feedforward_encoder_exactly_match_feedforward_each_item(self):
        feedforward = FeedForward(
            input_dim=10, num_layers=1, hidden_dims=10, activations=Activation.by_name("linear")()
        )
        encoder = FeedForwardEncoder(feedforward)
        tensor = torch.randn([2, 3, 10])
        output = encoder(tensor)
        target = feedforward(tensor)
        numpy.testing.assert_array_almost_equal(
            target.detach().cpu().numpy(), output.detach().cpu().numpy()
        )

        # mask should work
        mask = torch.tensor([[True, True, True], [True, False, False]])
        output = encoder(tensor, mask)
        target = feedforward(tensor) * mask.unsqueeze(dim=-1).float()
        numpy.testing.assert_array_almost_equal(
            target.detach().cpu().numpy(), output.detach().cpu().numpy()
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:21,代码来源:feedforward_encoder_test.py

示例2: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(self,
                 encoder: Seq2SeqEncoder,
                 projection_feedforward: FeedForward,
                 inference_encoder: Seq2SeqEncoder,
                 output_feedforward: FeedForward,
                 similarity_function: SimilarityFunction = None,
                 dropout: float = 0.5) -> None:
        super().__init__()

        self._encoder = encoder
        self._matrix_attention = LegacyMatrixAttention(similarity_function)
        self._projection_feedforward = projection_feedforward
        self._inference_encoder = inference_encoder
        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
            self.rnn_input_dropout = InputVariationalDropout(dropout)
        else:
            self.dropout = None
            self.rnn_input_dropout = None
        self._output_feedforward = output_feedforward 
开发者ID:StonyBrookNLP,项目名称:multee,代码行数:22,代码来源:esim_comparator.py

示例3: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(self,
                 vocab,
                 encoder: FeedForward,
                 mean_projection: FeedForward,
                 log_variance_projection: FeedForward,
                 decoder: FeedForward,
                 kld_clamp: Optional[float] = None,
                 z_dropout: float = 0.2) -> None:
        super(LogisticNormal, self).__init__(vocab)
        self.encoder = encoder
        self.mean_projection = mean_projection
        self.log_variance_projection = log_variance_projection
        self._kld_clamp = kld_clamp
        self._decoder = torch.nn.Linear(decoder.get_input_dim(), decoder.get_output_dim(),
                                        bias=False)
        self._z_dropout = torch.nn.Dropout(z_dropout)

        self.latent_dim = mean_projection.get_output_dim() 
开发者ID:allenai,项目名称:vampire,代码行数:20,代码来源:logistic_normal.py

示例4: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(self, vocab: Vocabulary,
                 input_dim: int,
                 num_classes: int,
                 label_namespace: str = "labels",
                 feedforward: Optional[FeedForward] = None,
                 dropout: Optional[float] = None,
                 verbose_metrics: bool = False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)
        self.label_namespace = label_namespace
        self.input_dim = input_dim
        self.num_classes = num_classes 
        self._verbose_metrics = verbose_metrics
        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
        else:
            self.dropout = None
        self._feedforward = feedforward

        if self._feedforward is not None: 
            self.projection_layer = Linear(feedforward.get_output_dim(), self.num_classes)
        else:
            self.projection_layer = Linear(self.input_dim, self.num_classes)

        self.metrics = {
                "accuracy": CategoricalAccuracy(),
                "accuracy3": CategoricalAccuracy(top_k=3),
                "accuracy5": CategoricalAccuracy(top_k=5)
        }
        self._loss = torch.nn.CrossEntropyLoss()

        initializer(self) 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:35,代码来源:model.py

示例5: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 text_encoder: Seq2SeqEncoder,
                 classifier_feedforward: FeedForward,
                 verbose_metrics: False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None,
                 ) -> None:
        super(TextClassifier, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder
        self.num_classes = self.vocab.get_vocab_size("labels")
        self.text_encoder = text_encoder
        self.classifier_feedforward = classifier_feedforward
        self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim()  , self.num_classes)

        self.label_accuracy = CategoricalAccuracy()
        self.label_f1_metrics = {}

        self.verbose_metrics = verbose_metrics

        for i in range(self.num_classes):
            self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i)
        self.loss = torch.nn.CrossEntropyLoss()

        self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True)

        initializer(self) 
开发者ID:allenai,项目名称:scibert,代码行数:30,代码来源:text_classifier.py

示例6: test_get_dimension_is_correct

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def test_get_dimension_is_correct(self):
        feedforward = FeedForward(
            input_dim=10, num_layers=1, hidden_dims=10, activations=Activation.by_name("linear")()
        )
        encoder = FeedForwardEncoder(feedforward)
        assert encoder.get_input_dim() == feedforward.get_input_dim()
        assert encoder.get_output_dim() == feedforward.get_output_dim() 
开发者ID:allenai,项目名称:allennlp,代码行数:9,代码来源:feedforward_encoder_test.py

示例7: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(
        self,
        encoder_output_dim: int,
        action_embedding_dim: int,
        input_attention: Attention,
        activation: Activation = Activation.by_name("relu")(),
        add_action_bias: bool = True,
        mixture_feedforward: FeedForward = None,
        dropout: float = 0.0,
        num_layers: int = 1,
    ) -> None:
        super().__init__(
            encoder_output_dim=encoder_output_dim,
            action_embedding_dim=action_embedding_dim,
            input_attention=input_attention,
            activation=activation,
            add_action_bias=add_action_bias,
            dropout=dropout,
            num_layers=num_layers,
        )
        self._mixture_feedforward = mixture_feedforward

        if mixture_feedforward is not None:
            check_dimensions_match(
                encoder_output_dim,
                mixture_feedforward.get_input_dim(),
                "hidden state embedding dim",
                "mixture feedforward input dim",
            )
            check_dimensions_match(
                mixture_feedforward.get_output_dim(),
                1,
                "mixture feedforward output dim",
                "dimension for scalar value",
            ) 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:37,代码来源:linking_transition_function.py

示例8: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(
        self,
        encoder_output_dim: int,
        action_embedding_dim: int,
        input_attention: Attention,
        activation: Activation = Activation.by_name("relu")(),
        add_action_bias: bool = True,
        mixture_feedforward: FeedForward = None,
        dropout: float = 0.0,
    ) -> None:
        super().__init__(
            encoder_output_dim=encoder_output_dim,
            action_embedding_dim=action_embedding_dim,
            input_attention=input_attention,
            activation=activation,
            add_action_bias=add_action_bias,
            dropout=dropout,
        )
        self._linked_checklist_multiplier = Parameter(torch.FloatTensor([1.0]))
        self._mixture_feedforward = mixture_feedforward

        if mixture_feedforward is not None:
            check_dimensions_match(
                encoder_output_dim,
                mixture_feedforward.get_input_dim(),
                "hidden state embedding dim",
                "mixture feedforward input dim",
            )
            check_dimensions_match(
                mixture_feedforward.get_output_dim(),
                1,
                "mixture feedforward output dim",
                "dimension for scalar value",
            ) 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:36,代码来源:linking_coverage_transition_function.py

示例9: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 projection_feedforward: FeedForward,
                 inference_encoder: Seq2SeqEncoder,
                 output_feedforward: FeedForward,
                 output_logit: FeedForward,
                 final_feedforward: FeedForward,
                 coverage_loss: CoverageLoss,
                 similarity_function: SimilarityFunction = DotProductSimilarity(),
                 dropout: float = 0.5,
                 contextualize_pair_comparators: bool = False,
                 pair_context_encoder: Seq2SeqEncoder = None,
                 pair_feedforward: FeedForward = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
    	# Need to send it verbatim because otherwise FromParams doesn't work appropriately.
        super().__init__(vocab=vocab,
                         text_field_embedder=text_field_embedder,
                         encoder=encoder,
                         similarity_function=similarity_function,
                         projection_feedforward=projection_feedforward,
                         inference_encoder=inference_encoder,
                         output_feedforward=output_feedforward,
                         output_logit=output_logit,
                         final_feedforward=final_feedforward,
                         contextualize_pair_comparators=contextualize_pair_comparators,
                         coverage_loss=coverage_loss,
                         pair_context_encoder=pair_context_encoder,
                         pair_feedforward=pair_feedforward,
                         dropout=dropout,
                         initializer=initializer,
                         regularizer=regularizer)
        self._answer_loss = torch.nn.CrossEntropyLoss()

        self._accuracy = CategoricalAccuracy() 
开发者ID:StonyBrookNLP,项目名称:multee,代码行数:38,代码来源:single_correct_mcq_multee_esim.py

示例10: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 projection_feedforward: FeedForward,
                 inference_encoder: Seq2SeqEncoder,
                 output_feedforward: FeedForward,
                 output_logit: FeedForward,
                 final_feedforward: FeedForward,
                 coverage_loss: CoverageLoss,
                 similarity_function: SimilarityFunction = DotProductSimilarity(),
                 dropout: float = 0.5,
                 contextualize_pair_comparators: bool = False,
                 pair_context_encoder: Seq2SeqEncoder = None,
                 pair_feedforward: FeedForward = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab=vocab,
                         text_field_embedder=text_field_embedder,
                         encoder=encoder,
                         similarity_function=similarity_function,
                         projection_feedforward=projection_feedforward,
                         inference_encoder=inference_encoder,
                         output_feedforward=output_feedforward,
                         output_logit=output_logit,
                         final_feedforward=final_feedforward,
                         coverage_loss=coverage_loss,
                         contextualize_pair_comparators=contextualize_pair_comparators,
                         pair_context_encoder=pair_context_encoder,
                         pair_feedforward=pair_feedforward,
                         dropout=dropout,
                         initializer=initializer,
                         regularizer=regularizer)
        self._ignore_index = -1
        self._answer_loss = torch.nn.CrossEntropyLoss(ignore_index=self._ignore_index)
        self._coverage_loss = coverage_loss

        self._accuracy = CategoricalAccuracy()
        self._entailment_f1 = F1Measure(self._label2idx["entailment"]) 
开发者ID:StonyBrookNLP,项目名称:multee,代码行数:40,代码来源:multiple_correct_mcq_multee_esim.py

示例11: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(self, architecture: FeedForward) -> None:
        super(MLP, self).__init__(architecture)
        self._architecture = architecture 
开发者ID:allenai,项目名称:vampire,代码行数:5,代码来源:encoder.py

示例12: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(
        self,
        vocab: Vocabulary,
        text_field_embedder: TextFieldEmbedder,
        seq2vec_encoder: Seq2VecEncoder,
        seq2seq_encoder: Seq2SeqEncoder = None,
        feedforward: Optional[FeedForward] = None,
        dropout: float = None,
        num_labels: int = None,
        label_namespace: str = "labels",
        namespace: str = "tokens",
        initializer: InitializerApplicator = InitializerApplicator(),
        **kwargs,
    ) -> None:

        super().__init__(vocab, **kwargs)
        self._text_field_embedder = text_field_embedder

        if seq2seq_encoder:
            self._seq2seq_encoder = seq2seq_encoder
        else:
            self._seq2seq_encoder = None

        self._seq2vec_encoder = seq2vec_encoder
        self._feedforward = feedforward
        if feedforward is not None:
            self._classifier_input_dim = self._feedforward.get_output_dim()
        else:
            self._classifier_input_dim = self._seq2vec_encoder.get_output_dim()

        if dropout:
            self._dropout = torch.nn.Dropout(dropout)
        else:
            self._dropout = None
        self._label_namespace = label_namespace
        self._namespace = namespace

        if num_labels:
            self._num_labels = num_labels
        else:
            self._num_labels = vocab.get_vocab_size(namespace=self._label_namespace)
        self._classification_layer = torch.nn.Linear(self._classifier_input_dim, self._num_labels)
        self._accuracy = CategoricalAccuracy()
        self._loss = torch.nn.CrossEntropyLoss()
        initializer(self) 
开发者ID:allenai,项目名称:allennlp,代码行数:47,代码来源:basic_classifier.py

示例13: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(self, vocab: Vocabulary,
                 encoder_keys: List[str],
                 mask_key: str,
                 pair2vec_config_file: str,
                 pair2vec_model_file: str,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 similarity_function: SimilarityFunction,
                 projection_feedforward: FeedForward,
                 inference_encoder: Seq2SeqEncoder,
                 output_feedforward: FeedForward,
                 output_logit: FeedForward,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 dropout: float = 0.5,
                 pair2vec_dropout: float = 0.0,
                 bidirectional_pair2vec: bool = True,
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)
        self._vocab = vocab
        self.pair2vec = util.get_pair2vec(pair2vec_config_file, pair2vec_model_file)
        self._encoder_keys = encoder_keys
        self._mask_key = mask_key
        self._text_field_embedder = text_field_embedder
        self._projection_feedforward = projection_feedforward
        self._encoder = encoder
        from allennlp.modules.matrix_attention import DotProductMatrixAttention

        self._matrix_attention = DotProductMatrixAttention()


        self._inference_encoder = inference_encoder
        self._pair2vec_dropout = torch.nn.Dropout(pair2vec_dropout)
        self._bidirectional_pair2vec = bidirectional_pair2vec

        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
            self.rnn_input_dropout = VariationalDropout(dropout)
        else:
            self.dropout = None
            self.rnn_input_dropout = None

        self._output_feedforward = output_feedforward
        self._output_logit = output_logit

        self._num_labels = vocab.get_vocab_size(namespace="labels")


        self._accuracy = CategoricalAccuracy()
        self._loss = torch.nn.CrossEntropyLoss()

        initializer(self) 
开发者ID:mandarjoshi90,项目名称:pair2vec,代码行数:53,代码来源:esim_pair2vec.py

示例14: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 similarity_function: SimilarityFunction,
                 projection_feedforward: FeedForward,
                 inference_encoder: Seq2SeqEncoder,
                 output_feedforward: FeedForward,
                 output_logit: FeedForward,
                 final_feedforward: FeedForward,
                 coverage_loss: CoverageLoss = None,
                 contextualize_pair_comparators: bool = False,
                 pair_context_encoder: Seq2SeqEncoder = None,
                 pair_feedforward: FeedForward = None,
                 optimize_coverage_for: List = ["entailment", "neutral"],
                 dropout: float = 0.5,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)
        self._label2idx = self.vocab.get_token_to_index_vocabulary('labels')

        self._text_field_embedder = text_field_embedder

        self._entailment_comparator_layer_1 = EsimComparatorLayer1(encoder, dropout)
        self._entailment_comparator_layer_2 = EsimComparatorLayer2(similarity_function)

        self._td_entailment_comparator_layer_1 = TimeDistributed(self._entailment_comparator_layer_1)
        self._td_entailment_comparator_layer_2 = TimeDistributed(self._entailment_comparator_layer_2)

        self._entailment_comparator_layer_3plus_local = EsimComparatorLayer3Plus(projection_feedforward, inference_encoder,
                                                                                 output_feedforward, dropout)
        self._td_entailment_comparator_layer_3plus_local = EachOutputTimeDistributed(self._entailment_comparator_layer_3plus_local)

        self._entailment_comparator_layer_3plus_global = copy.deepcopy(self._entailment_comparator_layer_3plus_local)

        self._contextualize_pair_comparators = contextualize_pair_comparators

        if not self._contextualize_pair_comparators:
            self._output_logit = output_logit
            self._td_output_logit = TimeDistributed(self._output_logit)

        self._final_feedforward = final_feedforward
        self._td_final_feedforward = TimeDistributed(final_feedforward)

        linear = torch.nn.Linear(2*self._entailment_comparator_layer_3plus_local.get_output_dim(),
                                 self._final_feedforward.get_input_dim())
        self._local_global_projection = torch.nn.Sequential(linear, torch.nn.ReLU())

        if self._contextualize_pair_comparators:
            self._pair_context_encoder = pair_context_encoder
            self._td_pair_feedforward = TimeDistributed(pair_feedforward)

        self._coverage_loss = coverage_loss

        # Do not apply initializer. If you do, make sure it doesn't reinitialize transferred parameters. 
开发者ID:StonyBrookNLP,项目名称:multee,代码行数:56,代码来源:multee_esim.py

示例15: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import FeedForward [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 sentence_encoder: Seq2VecEncoder,
                 classifier_feedforward: FeedForward,
                 label_weight: Dict[str, float] = None,
                 use_label_distribution: bool = False,
                 image_classification_ratio: float = 0.0,
                 decay_every_i_step=100000,
                 decay_ratio=0.8,
                 instance_count=100000,
                 max_epoch=10,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None
                 ) -> None:
        super(BasicClassifier, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder
        self.num_classes = self.vocab.get_vocab_size("labels")
        self.sentence_encoder = sentence_encoder
        self.classifier_feedforward = classifier_feedforward

        if text_field_embedder.get_output_dim() != sentence_encoder.get_input_dim():
            raise ConfigurationError("The output dimension of the text_field_embedder must match the "
                                     "input dimension of the title_encoder. Found {} and {}, "
                                     "respectively.".format(text_field_embedder.get_output_dim(),
                                                            sentence_encoder.get_input_dim()))
        self.metrics = {
                "accuracy": CategoricalAccuracy(),
                "cnn_loss": Average()
        }
        if not use_label_distribution:
            self.loss = torch.nn.CrossEntropyLoss()
        else:
            self.loss = torch.nn.CrossEntropyLoss()
        self.image_classification_ratio = image_classification_ratio
        self.decay_every_i_step = decay_every_i_step
        self.decay_ratio = decay_ratio
        self.training_step = 0
        self.current_ratio = image_classification_ratio
        self.total_steps = max_epoch*instance_count//64
        self.step_every_epoch = instance_count // 64

        print("每个epoch的step数量", self.step_every_epoch)

        initializer(self) 
开发者ID:ShannonAI,项目名称:glyce,代码行数:47,代码来源:model.py


注:本文中的allennlp.modules.FeedForward方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。