当前位置: 首页>>代码示例>>Python>>正文


Python linear.Linear方法代码示例

本文整理汇总了Python中torch.nn.modules.linear.Linear方法的典型用法代码示例。如果您正苦于以下问题:Python linear.Linear方法的具体用法?Python linear.Linear怎么用?Python linear.Linear使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.modules.linear的用法示例。


在下文中一共展示了linear.Linear方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_layer_from_submodule

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def get_layer_from_submodule(self, submodule: torch.nn.Module,
                                 layers: dict, name_prefix: str = ''):
        if len(submodule._modules) > 0:
            for idx, (name, subsubmodule) in \
                              enumerate(submodule._modules.items()):
                new_prefix = name if name_prefix == '' else name_prefix + \
                                                            '-' + name
                self.get_layer_from_submodule(subsubmodule, layers, new_prefix)
            return layers
        else:
            layer_name = name_prefix
            layer_type = layer_name
            if not isinstance(submodule, Conv2d) and not \
                   isinstance(submodule, Linear) and not \
                   isinstance(submodule, LSTM):
                print(f"Skipping {layer_type}")
                return layers
            if isinstance(submodule, Conv2d) and self.include_conv:
                self._add_conv_layer(submodule)
            layers[layer_name] = submodule
            print('added layer {}'.format(layer_name))
            return layers 
开发者ID:delve-team,项目名称:delve,代码行数:24,代码来源:torchcallback.py

示例2: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self, vocab            ,
                 text_field_embedder                   ,
                 encoder                ,
                 initializer                        = InitializerApplicator(),
                 regularizer                                  = None)        :
        super(SimpleTagger, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder
        self.num_classes = self.vocab.get_vocab_size(u"labels")
        self.encoder = encoder
        self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
                                                           self.num_classes))

        check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
                               u"text field embedding dim", u"encoder input dim")
        self.metrics = {
                u"accuracy": CategoricalAccuracy(),
                u"accuracy3": CategoricalAccuracy(top_k=3)
        }

        initializer(self)

    #overrides 
开发者ID:plasticityai,项目名称:magnitude,代码行数:25,代码来源:simple_tagger.py

示例3: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self, dim: int,
                 normalize: bool = True,
                 use_coverage: bool = False,
                 init_coverage_layer: bool = True,
                 use_attn_bias=False):
        super(BahdanauAttention, self).__init__(normalize)

        self._dim = dim
        self._use_coverage = use_coverage

        self._decoder_hidden_projection_layer = Linear(dim, dim, bias=False)
        self._encoder_outputs_projection_layer = Linear(dim, dim, bias=False)
        self._v = Linear(dim, 1, bias=False)
        self._use_attn_bias = use_attn_bias
        if use_attn_bias:
            self._bias = Parameter(torch.zeros(1), requires_grad=True)
        if use_coverage or init_coverage_layer:
            self._coverage_projection_layer = Linear(1, dim, bias=False) 
开发者ID:IlyaGusev,项目名称:summarus,代码行数:20,代码来源:bahdanau_attention.py

示例4: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self, vocab: Vocabulary,
                 input_dim: int,
                 num_classes: int,
                 label_namespace: str = "labels",
                 feedforward: Optional[FeedForward] = None,
                 dropout: Optional[float] = None,
                 verbose_metrics: bool = False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)
        self.label_namespace = label_namespace
        self.input_dim = input_dim
        self.num_classes = num_classes 
        self._verbose_metrics = verbose_metrics
        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
        else:
            self.dropout = None
        self._feedforward = feedforward

        if self._feedforward is not None: 
            self.projection_layer = Linear(feedforward.get_output_dim(), self.num_classes)
        else:
            self.projection_layer = Linear(self.input_dim, self.num_classes)

        self.metrics = {
                "accuracy": CategoricalAccuracy(),
                "accuracy3": CategoricalAccuracy(top_k=3),
                "accuracy5": CategoricalAccuracy(top_k=5)
        }
        self._loss = torch.nn.CrossEntropyLoss()

        initializer(self) 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:35,代码来源:model.py

示例5: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 include_start_end_transitions: bool = True,
                 dropout: Optional[float] = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)

        self.label_namespace = 'labels'
        self.num_tags = self.vocab.get_vocab_size(self.label_namespace)

        # encode text
        self.text_field_embedder = text_field_embedder
        self.encoder = encoder
        self.dropout = torch.nn.Dropout(dropout) if dropout else None

        # crf
        output_dim = self.encoder.get_output_dim()
        self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_tags))
        self.crf = ConditionalRandomField(self.num_tags, constraints=None, include_start_end_transitions=include_start_end_transitions)

        self.metrics = {
            "accuracy": CategoricalAccuracy(),
            "accuracy3": CategoricalAccuracy(top_k=3)
        }
        for index, label in self.vocab.get_index_to_token_vocabulary(self.label_namespace).items():
            self.metrics['F1_' + label] = F1Measure(positive_label=index)

        initializer(self) 
开发者ID:allenai,项目名称:scibert,代码行数:32,代码来源:pico_crf_tagger.py

示例6: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 label_namespace: str = "labels",
                 constraint_type: str = None,
                 include_start_end_transitions: bool = True,
                 dropout: float = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)

        self.label_namespace = label_namespace
        self.text_field_embedder = text_field_embedder
        self.num_tags = self.vocab.get_vocab_size(label_namespace)
        self.encoder = encoder
        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
        else:
            self.dropout = None
        self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
                                                           self.num_tags))

        if constraint_type is not None:
            labels = self.vocab.get_index_to_token_vocabulary(label_namespace)
            constraints = allowed_transitions(constraint_type, labels)
        else:
            constraints = None

        self.crf = ConditionalRandomField(
                self.num_tags, constraints,
                include_start_end_transitions=include_start_end_transitions
        )

        self.span_metric = SpanBasedF1Measure(vocab,
                                              tag_namespace=label_namespace,
                                              label_encoding=constraint_type or "BIO")

        check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
                               "text field embedding dim", "encoder input dim")
        initializer(self) 
开发者ID:arthurmensch,项目名称:didyprog,代码行数:42,代码来源:crf_tagger.py

示例7: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self,
                 token_embedders: Dict[str, TokenEmbedder],
                 output_dim: Optional[int] = None,
                 sum_embeddings: List[str] = None,
                 embedder_to_indexer_map: Dict[str, List[str]] = None,
                 allow_unmatched_keys: bool = False,
                 dropout: float = 0.0) -> None:
        super(UdifyTextFieldEmbedder, self).__init__()
        self._output_dim = output_dim
        self._token_embedders = token_embedders
        self._embedder_to_indexer_map = embedder_to_indexer_map
        for key, embedder in token_embedders.items():
            name = 'token_embedder_%s' % key
            self.add_module(name, embedder)
        self._allow_unmatched_keys = allow_unmatched_keys
        self._dropout = torch.nn.Dropout(p=dropout) if dropout > 0 else lambda x: x
        self._sum_embeddings = sum_embeddings if sum_embeddings is not None else []

        hidden_dim = 0
        for embedder in self._token_embedders.values():
            hidden_dim += embedder.get_output_dim()

        if len(self._sum_embeddings) > 1:
            for key in self._sum_embeddings[1:]:
                hidden_dim -= self._token_embedders[key].get_output_dim()

        if self._output_dim is None:
            self._projection_layer = None
            self._output_dim = hidden_dim
        else:
            self._projection_layer = Linear(hidden_dim, self._output_dim) 
开发者ID:Hyperparticle,项目名称:udify,代码行数:33,代码来源:text_field_embedder.py

示例8: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self,
                 encoder_output_dim     ,
                 action_embedding_dim     ,
                 input_attention           ,
                 dropout        = 0.0,
                 use_coverage       = False)        :
        super(NlvrDecoderStep, self).__init__()
        self._input_attention = input_attention

        # Decoder output dim needs to be the same as the encoder output dim since we initialize the
        # hidden state of the decoder with the final hidden state of the encoder.
        output_dim = encoder_output_dim
        input_dim = output_dim
        # Our decoder input will be the concatenation of the decoder hidden state and the previous
        # action embedding, and we'll project that down to the decoder's `input_dim`, which we
        # arbitrarily set to be the same as `output_dim`.
        self._input_projection_layer = Linear(output_dim + action_embedding_dim, input_dim)
        # Before making a prediction, we'll compute an attention over the input given our updated
        # hidden state. Then we concatenate those with the decoder state and project to
        # `action_embedding_dim` to make a prediction.
        self._output_projection_layer = Linear(output_dim + encoder_output_dim, action_embedding_dim)
        if use_coverage:
            # This is a multiplicative factor that is used to add the embeddings of yet to be
            # produced actions to the predicted embedding and bias it.
            self._checklist_embedding_multiplier = Parameter(torch.FloatTensor([1.0]))
        # TODO(pradeep): Do not hardcode decoder cell type.
        self._decoder_cell = LSTMCell(input_dim, output_dim)
        if dropout > 0:
            self._dropout = torch.nn.Dropout(p=dropout)
        else:
            self._dropout = lambda x: x

    #overrides 
开发者ID:plasticityai,项目名称:magnitude,代码行数:35,代码来源:nlvr_decoder_step.py

示例9: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 calculate_span_f1: bool = None,
                 label_encoding: Optional[str] = None,
                 label_namespace: str = "labels",
                 verbose_metrics: bool = False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(SimpleTagger, self).__init__(vocab, regularizer)

        self.label_namespace = label_namespace
        self.text_field_embedder = text_field_embedder
        self.num_classes = self.vocab.get_vocab_size(label_namespace)
        self.encoder = encoder
        self._verbose_metrics = verbose_metrics
        self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
                                                           self.num_classes))

        check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
                               "text field embedding dim", "encoder input dim")

        # We keep calculate_span_f1 as a constructor argument for API consistency with
        # the CrfTagger, even it is redundant in this class
        # (label_encoding serves the same purpose).
        if calculate_span_f1 and not label_encoding:
            raise ConfigurationError("calculate_span_f1 is True, but "
                                     "no label_encoding was specified.")
        self.metrics = {
            "accuracy": CategoricalAccuracy(),
            "accuracy3": CategoricalAccuracy(top_k=3)
        }

        if calculate_span_f1 or label_encoding:
            self._f1_metric = SpanBasedF1Measure(vocab,
                                                 tag_namespace=label_namespace,
                                                 label_encoding=label_encoding)
        else:
            self._f1_metric = None

        initializer(self) 
开发者ID:DreamerDeo,项目名称:HIT-SCIR-CoNLL2019,代码行数:43,代码来源:simple_tagger.py

示例10: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self,
                 vocab: Vocabulary,
                 task: str,
                 encoder: Seq2SeqEncoder,
                 label_smoothing: float = 0.0,
                 dropout: float = 0.0,
                 adaptive: bool = False,
                 features: List[str] = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(TagDecoder, self).__init__(vocab, regularizer)

        self.task = task
        self.encoder = encoder
        self.output_dim = encoder.get_output_dim()
        self.label_smoothing = label_smoothing
        self.num_classes = self.vocab.get_vocab_size(task)
        self.adaptive = adaptive
        self.features = features if features else []

        self.metrics = {
            "acc": CategoricalAccuracy(),
            # "acc3": CategoricalAccuracy(top_k=3)
        }

        if self.adaptive:
            # TODO
            adaptive_cutoffs = [round(self.num_classes / 15), 3 * round(self.num_classes / 15)]
            self.task_output = AdaptiveLogSoftmaxWithLoss(self.output_dim,
                                                          self.num_classes,
                                                          cutoffs=adaptive_cutoffs,
                                                          div_value=4.0)
        else:
            self.task_output = TimeDistributed(Linear(self.output_dim, self.num_classes))

        self.feature_outputs = torch.nn.ModuleDict()
        self.features_metrics = {}
        for feature in self.features:
            self.feature_outputs[feature] = TimeDistributed(Linear(self.output_dim,
                                                                   vocab.get_vocab_size(feature)))
            self.features_metrics[feature] = {
                "acc": CategoricalAccuracy(),
            }

        initializer(self) 
开发者ID:Hyperparticle,项目名称:udify,代码行数:47,代码来源:tag_decoder.py

示例11: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self,
                 vocab            ,
                 text_field_embedder                   ,
                 span_extractor               ,
                 encoder                ,
                 feedforward              = None,
                 pos_tag_embedding            = None,
                 initializer                        = InitializerApplicator(),
                 regularizer                                  = None,
                 evalb_directory_path      = DEFAULT_EVALB_DIR)        :
        super(SpanConstituencyParser, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder
        self.span_extractor = span_extractor
        self.num_classes = self.vocab.get_vocab_size(u"labels")
        self.encoder = encoder
        self.feedforward_layer = TimeDistributed(feedforward) if feedforward else None
        self.pos_tag_embedding = pos_tag_embedding or None
        if feedforward is not None:
            output_dim = feedforward.get_output_dim()
        else:
            output_dim = span_extractor.get_output_dim()

        self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_classes))

        representation_dim = text_field_embedder.get_output_dim()
        if pos_tag_embedding is not None:
            representation_dim += pos_tag_embedding.get_output_dim()
        check_dimensions_match(representation_dim,
                               encoder.get_input_dim(),
                               u"representation dim (tokens + optional POS tags)",
                               u"encoder input dim")
        check_dimensions_match(encoder.get_output_dim(),
                               span_extractor.get_input_dim(),
                               u"encoder input dim",
                               u"span extractor input dim")
        if feedforward is not None:
            check_dimensions_match(span_extractor.get_output_dim(),
                                   feedforward.get_input_dim(),
                                   u"span extractor output dim",
                                   u"feedforward input dim")

        self.tag_accuracy = CategoricalAccuracy()

        if evalb_directory_path is not None:
            self._evalb_score = EvalbBracketingScorer(evalb_directory_path)
        else:
            self._evalb_score = None
        initializer(self)

    #overrides 
开发者ID:plasticityai,项目名称:magnitude,代码行数:53,代码来源:constituency_parser.py

示例12: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self, vocab            ,
                 text_field_embedder                   ,
                 encoder                ,
                 label_namespace      = u"labels",
                 constraint_type      = None,
                 feedforward              = None,
                 include_start_end_transitions       = True,
                 dropout        = None,
                 verbose_metrics       = False,
                 initializer                        = InitializerApplicator(),
                 regularizer                                  = None)        :
        super(CrfTagger, self).__init__(vocab, regularizer)

        self.label_namespace = label_namespace
        self.text_field_embedder = text_field_embedder
        self.num_tags = self.vocab.get_vocab_size(label_namespace)
        self.encoder = encoder
        self._verbose_metrics = verbose_metrics
        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
        else:
            self.dropout = None
        self._feedforward = feedforward

        if feedforward is not None:
            output_dim = feedforward.get_output_dim()
        else:
            output_dim = self.encoder.get_output_dim()
        self.tag_projection_layer = TimeDistributed(Linear(output_dim,
                                                           self.num_tags))

        if constraint_type is not None:
            labels = self.vocab.get_index_to_token_vocabulary(label_namespace)
            constraints = allowed_transitions(constraint_type, labels)
        else:
            constraints = None

        self.crf = ConditionalRandomField(
                self.num_tags, constraints,
                include_start_end_transitions=include_start_end_transitions
        )

        self.span_metric = SpanBasedF1Measure(vocab,
                                              tag_namespace=label_namespace,
                                              label_encoding=constraint_type or u"BIO")


        check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
                               u"text field embedding dim", u"encoder input dim")
        if feedforward is not None:
            check_dimensions_match(encoder.get_output_dim(), feedforward.get_input_dim(),
                                   u"encoder output dim", u"feedforward input dim")
        initializer(self)

    #overrides 
开发者ID:plasticityai,项目名称:magnitude,代码行数:57,代码来源:crf_tagger.py

示例13: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self,
                 encoder_output_dim     ,
                 action_embedding_dim     ,
                 input_attention           ,
                 num_start_types     ,
                 num_entity_types     ,
                 mixture_feedforward              = None,
                 dropout        = 0.0,
                 unlinked_terminal_indices            = None)        :
        super(WikiTablesDecoderStep, self).__init__()
        self._mixture_feedforward = mixture_feedforward
        self._entity_type_embedding = Embedding(num_entity_types, action_embedding_dim)
        self._input_attention = input_attention

        self._num_start_types = num_start_types
        self._start_type_predictor = Linear(encoder_output_dim, num_start_types)

        # Decoder output dim needs to be the same as the encoder output dim since we initialize the
        # hidden state of the decoder with the final hidden state of the encoder.
        output_dim = encoder_output_dim
        input_dim = output_dim
        # Our decoder input will be the concatenation of the decoder hidden state and the previous
        # action embedding, and we'll project that down to the decoder's `input_dim`, which we
        # arbitrarily set to be the same as `output_dim`.
        self._input_projection_layer = Linear(output_dim + action_embedding_dim, input_dim)
        # Before making a prediction, we'll compute an attention over the input given our updated
        # hidden state. Then we concatenate those with the decoder state and project to
        # `action_embedding_dim` to make a prediction.
        self._output_projection_layer = Linear(output_dim + encoder_output_dim, action_embedding_dim)
        if unlinked_terminal_indices is not None:
            # This means we are using coverage to train the parser.
            # These factors are used to add the embeddings of yet to be produced actions to the
            # predicted embedding, and to boost the action logits of yet to be produced linked
            # actions, respectively.
            self._unlinked_checklist_multiplier = Parameter(torch.FloatTensor([1.0]))
            self._linked_checklist_multiplier = Parameter(torch.FloatTensor([1.0]))

        self._unlinked_terminal_indices = unlinked_terminal_indices
        # TODO(pradeep): Do not hardcode decoder cell type.
        self._decoder_cell = LSTMCell(input_dim, output_dim)

        if mixture_feedforward is not None:
            check_dimensions_match(output_dim, mixture_feedforward.get_input_dim(),
                                   u"hidden state embedding dim", u"mixture feedforward input dim")
            check_dimensions_match(mixture_feedforward.get_output_dim(), 1,
                                   u"mixture feedforward output dim", u"dimension for scalar value")

        if dropout > 0:
            self._dropout = torch.nn.Dropout(p=dropout)
        else:
            self._dropout = lambda x: x

    #overrides 
开发者ID:plasticityai,项目名称:magnitude,代码行数:55,代码来源:wikitables_decoder_step.py

示例14: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(
        self,
        encoder_output_dim: int,
        action_embedding_dim: int,
        input_attention: Attention,
        activation: Activation = Activation.by_name("relu")(),
        add_action_bias: bool = True,
        dropout: float = 0.0,
        num_layers: int = 1,
    ) -> None:
        super().__init__()
        self._input_attention = input_attention
        self._add_action_bias = add_action_bias
        self._activation = activation
        self._num_layers = num_layers

        # Decoder output dim needs to be the same as the encoder output dim since we initialize the
        # hidden state of the decoder with the final hidden state of the encoder.
        output_dim = encoder_output_dim
        input_dim = output_dim
        # Our decoder input will be the concatenation of the attended encoder hidden state (i.e.,
        # the attended question encoding) and the previous action embedding, and we'll project that
        # down to the decoder's `input_dim`, which we arbitrarily set to be the same as
        # `output_dim`.
        self._input_projection_layer = Linear(encoder_output_dim + action_embedding_dim, input_dim)
        # Before making a prediction, we'll compute an attention over the input given our updated
        # hidden state. Then we concatenate those with the decoder state and project to
        # `action_embedding_dim` to make a prediction.
        self._output_projection_layer = Linear(
            output_dim + encoder_output_dim, action_embedding_dim
        )
        if self._num_layers > 1:
            self._decoder_cell = LSTM(input_dim, output_dim, self._num_layers)
        else:
            # We use a ``LSTMCell`` if we just have one layer because it is slightly faster since we are
            # just running the LSTM for one step each time.
            self._decoder_cell = LSTMCell(input_dim, output_dim)

        if dropout > 0:
            self._dropout = torch.nn.Dropout(p=dropout)
        else:
            self._dropout = lambda x: x 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:44,代码来源:basic_transition_function.py

示例15: __init__

# 需要导入模块: from torch.nn.modules import linear [as 别名]
# 或者: from torch.nn.modules.linear import Linear [as 别名]
def __init__(self,
                 vocab: Vocabulary,
                 source_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 max_decoding_steps: int,
                 attention: Attention = None,
                 beam_size: int = None,
                 target_namespace: str = "tokens",
                 target_embedding_dim: int = None,
                 scheduled_sampling_ratio: float = 0.,
                 use_projection: bool = False,
                 projection_dim: int = None,
                 tie_embeddings: bool = False) -> None:
        super(Seq2Seq, self).__init__(
            vocab,
            source_embedder,
            encoder,
            max_decoding_steps,
            attention,
            None,
            beam_size,
            target_namespace,
            target_embedding_dim,
            scheduled_sampling_ratio
        )
        use_projection = use_projection or projection_dim is not None

        self._tie_embeddings = tie_embeddings

        if self._tie_embeddings:
            assert "token_embedder_tokens" in dict(self._source_embedder.named_children())
            source_token_embedder = dict(self._source_embedder.named_children())["token_embedder_tokens"]
            self._target_embedder.weight = source_token_embedder.weight

        num_classes = self.vocab.get_vocab_size(self._target_namespace)
        self._use_projection = use_projection
        if self._use_projection:
            self._projection_dim = projection_dim or self._source_embedder.get_output_dim()
            self._hidden_projection_layer = Linear(self._decoder_output_dim, self._projection_dim)
            self._output_projection_layer = Linear(self._projection_dim, num_classes)
        else:
            self._output_projection_layer = Linear(self._decoder_output_dim, num_classes)
        self._bleu = False 
开发者ID:IlyaGusev,项目名称:summarus,代码行数:45,代码来源:seq2seq.py


注:本文中的torch.nn.modules.linear.Linear方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。