当前位置: 首页>>代码示例>>Python>>正文


Python rnn.pad_packed_sequence方法代码示例

本文整理汇总了Python中torch.nn.utils.rnn.pad_packed_sequence方法的典型用法代码示例。如果您正苦于以下问题:Python rnn.pad_packed_sequence方法的具体用法?Python rnn.pad_packed_sequence怎么用?Python rnn.pad_packed_sequence使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.utils.rnn的用法示例。


在下文中一共展示了rnn.pad_packed_sequence方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, word, sentence_length):
        """
        :param word:
        :param sentence_length:
        :param desorted_indices:
        :return:
        """
        word, sentence_length, desorted_indices = prepare_pack_padded_sequence(word, sentence_length, device=self.device)
        x = self.embed(word)  # (N,W,D)
        x = self.dropout_embed(x)
        packed_embed = pack_padded_sequence(x, sentence_length, batch_first=True)
        x, _ = self.bilstm(packed_embed)
        x, _ = pad_packed_sequence(x, batch_first=True)
        x = x[desorted_indices]
        x = self.dropout(x)
        x = torch.tanh(x)
        logit = self.linear(x)
        return logit 
开发者ID:bamtercelboo,项目名称:pytorch_NER_BiLSTM_CNN_CRF,代码行数:20,代码来源:BiLSTM.py

示例2: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, encoding, lengths):
        lengths = Variable(torch.LongTensor(lengths))
        if torch.cuda.is_available():
            lengths = lengths.cuda()
        if self.method == 'mean':
            encoding_pad = nn.utils.rnn.pack_padded_sequence(encoding, lengths.data.tolist(), batch_first=True)
            encoding = nn.utils.rnn.pad_packed_sequence(encoding_pad, batch_first=True, padding_value=0)[0]
            lengths = lengths.float().view(-1, 1)
            return encoding.sum(1) / lengths, None
        elif self.method == 'max':
            return encoding.max(1)  # [bsz, in_dim], [bsz, in_dim] (position)
        elif self.method == 'attn':
            size = encoding.size()  # [bsz, len, in_dim]
            x_flat = encoding.contiguous().view(-1, size[2])  # [bsz*len, in_dim]
            hbar = self.tanh(self.ws1(x_flat))  # [bsz*len, attn_hid]
            alphas = self.ws2(hbar).view(size[0], size[1])  # [bsz, len]
            alphas = nn.utils.rnn.pack_padded_sequence(alphas, lengths.data.tolist(), batch_first=True)
            alphas = nn.utils.rnn.pad_packed_sequence(alphas, batch_first=True, padding_value=-1e8)[0]
            alphas = functional.softmax(alphas, dim=1)  # [bsz, len]
            alphas = alphas.view(size[0], 1, size[1])  # [bsz, 1, len]
            return torch.bmm(alphas, encoding).squeeze(1), alphas  # [bsz, in_dim], [bsz, len]
        elif self.method == 'last':
            return torch.cat([encoding[i][lengths[i] - 1] for i in range(encoding.size(0))], dim=0), None 
开发者ID:ExplorerFreda,项目名称:VSE-C,代码行数:25,代码来源:model.py

示例3: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, src, lengths=None, encoder_state=None):
        "See :obj:`EncoderBase.forward()`"
        self._check_args(src, lengths, encoder_state)

        emb = self.embeddings(src)
        s_len, batch, emb_dim = emb.size()

        packed_emb = emb
        if lengths is not None and not self.no_pack_padded_seq:
            # Lengths data is wrapped inside a Variable.
            lengths = lengths.view(-1).tolist()
            packed_emb = pack(emb, lengths)

        memory_bank, encoder_final = self.rnn(packed_emb, encoder_state)

        if lengths is not None and not self.no_pack_padded_seq:
            memory_bank = unpack(memory_bank)[0]

        if self.use_bridge:
            encoder_final = self._bridge(encoder_final)
        return encoder_final, memory_bank 
开发者ID:xiadingZ,项目名称:video-caption-openNMT.pytorch,代码行数:23,代码来源:Models.py

示例4: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, inputs):
        x, seq_lengths = inputs

        embedding = self.embeddings(x).permute(0, 2, 1)

        conv1 = self.conv1(embedding).permute(0, 2, 1)
        conv2 = self.conv2(embedding).permute(0, 2, 1)

        out = torch.stack((conv1, conv2), 3)
        out, _ = torch.max(out, 3)

        packed_input = pack_padded_sequence(
            out,
            seq_lengths.cpu().numpy(),
            batch_first=True
        )

        packed_output, (ht, ct) = self.lstm(packed_input)
        out, input_sizes = pad_packed_sequence(packed_output, batch_first=True)
        out = F.relu(self.linear1(out))
        out = self.linear2(out)

        out = out.view(-1)

        return out 
开发者ID:PyThaiNLP,项目名称:attacut,代码行数:27,代码来源:seq_ch_conv_lstm.py

示例5: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, inputs):
        x, seq_lengths = inputs

        embedding = self.embeddings(x)
        packed_input = pack_padded_sequence(
            embedding,
            seq_lengths.cpu().numpy(),
            batch_first=True
        )

        packed_output, (ht, ct) = self.lstm(packed_input)
        output, input_sizes = pad_packed_sequence(packed_output, batch_first=True)

        out = F.relu(self.linear1(output))
        out = self.linear2(out)
        out = out.view(-1)

        return out 
开发者ID:PyThaiNLP,项目名称:attacut,代码行数:20,代码来源:seq_lstm.py

示例6: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, inputs):
        x, seq_lengths = inputs

        embedding = self.embeddings(x)
        packed_input = pack_padded_sequence(
            embedding,
            seq_lengths.cpu().numpy(),
            batch_first=True
        )

        packed_output, (ht, ct) = self.lstm(packed_input)
        output, input_sizes = pad_packed_sequence(packed_output, batch_first=True)

        output = F.relu(self.conv1(output.permute(0, 2, 1)).permute(0, 2, 1))

        out = F.relu(self.linear1(output))
        out = self.linear2(out)
        out = out.view(-1)

        return out 
开发者ID:PyThaiNLP,项目名称:attacut,代码行数:22,代码来源:seq_ch_lstm_conv.py

示例7: log_val

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def log_val(word_inds, defns, cost, rank, ranking, num_ex=10):
    print("mean rank {:.1f}------------------------------------------".format(np.mean(rank)))

    engl_defns, ls = pad_packed_sequence(defns, batch_first=True, padding_value=0)
    spacing = np.linspace(len(ls) // num_ex, len(ls), endpoint=False, num=num_ex, dtype=np.int64)

    engl_defns = [' '.join([val_data.fields['text'].vocab.itos[x] for x in d[1:(l - 1)]])
                  for d, l in zip(engl_defns.cpu().data.numpy()[spacing], [ls[s] for s in spacing])]

    top_scorers = [[val_data.fields['label'].vocab.itos[int(x)] for x in t]
                   for t in ranking.data.cpu().numpy()[spacing, :3]]
    words = [val_data.fields['label'].vocab.itos[int(wi)] for wi in word_inds.cpu().numpy()[spacing]]

    for w, (word, rank_, top3, l, defn) in enumerate(
            zip(words, rank, top_scorers, cost, engl_defns)):
        print("w{:2d}/{:2d}, R{:5d} {:>30} ({:.3f}){:>13}: {}".format(
            w, 64, rank_, ' '.join(top3), l, word, defn))
    print("------------------------------------------------------------") 
开发者ID:uwnlp,项目名称:verb-attributes,代码行数:20,代码来源:def_to_atts_pretrain.py

示例8: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, src, lengths=None):
        "See :obj:`EncoderBase.forward()`"
        self._check_args(src, lengths)

        emb = self.embeddings(src)
        # s_len, batch, emb_dim = emb.size()

        packed_emb = emb
        if lengths is not None and not self.no_pack_padded_seq:
            # Lengths data is wrapped inside a Tensor.
            lengths_list = lengths.view(-1).tolist()
            packed_emb = pack(emb, lengths_list)

        memory_bank, encoder_final = self.rnn(packed_emb)

        if lengths is not None and not self.no_pack_padded_seq:
            memory_bank = unpack(memory_bank)[0]

        if self.use_bridge:
            encoder_final = self._bridge(encoder_final)
        return encoder_final, memory_bank, lengths 
开发者ID:lizekang,项目名称:ITDD,代码行数:23,代码来源:rnn_encoder.py

示例9: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, x, lengths):
        """Handles variable size captions
        """
        # Embed word ids to vectors
        x = self.embed(x)
        packed = pack_padded_sequence(x, lengths, batch_first=True)

        # Forward propagate RNN
        out, _ = self.rnn(packed)

        # Reshape *final* output to (batch_size, hidden_size)
        padded = pad_packed_sequence(out, batch_first=True)
        cap_emb, cap_len = padded

        if self.use_bi_gru:
            cap_emb = (cap_emb[:,:,:cap_emb.size(2)/2] + cap_emb[:,:,cap_emb.size(2)/2:])/2

        # normalization in the joint embedding space
        if not self.no_txtnorm:
            cap_emb = l2norm(cap_emb, dim=-1)

        return cap_emb, cap_len 
开发者ID:kuanghuei,项目名称:SCAN,代码行数:24,代码来源:model.py

示例10: apply_packed_sequence

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def apply_packed_sequence(rnn, embedding, lengths):
    """ Runs a forward pass of embeddings through an rnn using packed sequence.
    Args:
       rnn: The RNN that that we want to compute a forward pass with.
       embedding (FloatTensor b x seq x dim): A batch of sequence embeddings.
       lengths (LongTensor batch): The length of each sequence in the batch.

    Returns:
       output: The output of the RNN `rnn` with input `embedding`
    """
    # Sort Batch by sequence length
    lengths_sorted, permutation = torch.sort(lengths, descending=True)
    embedding_sorted = embedding[permutation]

    # Use Packed Sequence
    embedding_packed = pack(embedding_sorted, lengths_sorted, batch_first=True)
    outputs_packed, (hidden, cell) = rnn(embedding_packed)
    outputs_sorted, _ = unpack(outputs_packed, batch_first=True)
    # Restore original order
    _, permutation_rev = torch.sort(permutation, descending=False)
    outputs = outputs_sorted[permutation_rev]
    hidden, cell = hidden[:, permutation_rev], cell[:, permutation_rev]
    return outputs, (hidden, cell) 
开发者ID:Unbabel,项目名称:OpenKiwi,代码行数:25,代码来源:utils.py

示例11: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, inp, l, null_mask):
        """

        :param inp: shape = (B, T, emb_dim)
        :param null_mask: shape = (B, T)
        :return:
        """
        B = inp.shape[0]
        T = inp.shape[1]
        inp = inp.transpose(0, 1)  # shape = (20, n_batch, emb_dim)
        packed_emb = pack(inp, l)
        outputs, h_n = self.Encoder(packed_emb)  # h_n.shape = (n_layers * n_dir, n_batch, dim_h)
        outputs = unpack(outputs, total_length=T)[0]  # shape = (20, n_batch, dim_h * n_dir)
        h_n = h_n.view(self.n_layers, self.n_dir, B, self.dim_h).transpose(1, 2).transpose(2, 3).contiguous().view(self.n_layers, B, -1)
        # shape = (n_layers, n_batch, dim_h * n_dir)
        h_n = h_n[-1, :, :]  # shape = (n_batch, dim_h * n_dir)
        context, att_weight = self.Attention(h_n,
                                             outputs.transpose(0, 1),
                                             null_mask)  # (n_batch, dim_h * n_dir), (n_batch, 20)
        cls = self.MLP(context).squeeze(1)  # shape = (n_batch, )

        return cls, att_weight 
开发者ID:ChenWu98,项目名称:Point-Then-Operate,代码行数:24,代码来源:attention_classifier.py

示例12: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, input_raw, pack=False, input_len=None):
        if self.has_input:
            input = self.input(input_raw)
            input = self.relu(input)
        else:
            input = input_raw
        if pack:
            input = pack_padded_sequence(input, input_len, batch_first=True)
        output_raw, self.hidden = self.rnn(input, self.hidden)
        if pack:
            output_raw = pad_packed_sequence(output_raw, batch_first=True)[0]
        if self.has_output:
            output_raw = self.output(output_raw)
        # return hidden state at each time step
        return output_raw

# plain GRU model 
开发者ID:JiaxuanYou,项目名称:graph-generation,代码行数:19,代码来源:model.py

示例13: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, input, hidden=None):
        """
        input: (wrap(srcBatch), wrap(srcBioBatch), lengths)
        """
        lengths = input[-1].data.view(-1).tolist()  # lengths data is wrapped inside a Variable
        wordEmb = self.word_lut(input[0])
        emb = pack(wordEmb, lengths)
        outputs, hidden_t = self.rnn(emb, hidden)
        if isinstance(input, tuple):
            outputs = unpack(outputs)[0]
        forward_last = hidden_t[0]
        backward_last = hidden_t[1]
        time_step = outputs.size(0)
        batch_size = outputs.size(1)
        sentence_vector = torch.cat((forward_last, backward_last), dim=1)
        exp_buf = torch.cat((outputs, sentence_vector.unsqueeze(0).expand_as(outputs)), dim=2)
        selective_value = self.sigmoid(self.selective_gate(exp_buf.view(-1, exp_buf.size(2))))
        selective_value = selective_value.view(time_step, batch_size, -1)
        outputs = outputs * selective_value
        return hidden_t, outputs 
开发者ID:magic282,项目名称:SEASS,代码行数:22,代码来源:Models.py

示例14: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, embedded, hidden, input_lengths=None):
    """
    :param embedded: (src seq len, batch size, embed size)
    :param hidden: (num directions, batch size, encoder hidden size)
    :param input_lengths: list containing the non-padded length of each sequence in this batch;
                          if set, we use `PackedSequence` to skip the PAD inputs and leave the
                          corresponding encoder states as zeros
    :return: (src seq len, batch size, hidden size * num directions = decoder hidden size)

    Perform multi-step encoding.
    """
    if input_lengths is not None:
      embedded = pack_padded_sequence(embedded, input_lengths)

    output, hidden = self.gru(embedded, hidden)

    if input_lengths is not None:
      output, _ = pad_packed_sequence(output)

    if self.num_directions > 1:
      # hidden: (num directions, batch, hidden) => (1, batch, hidden * 2)
      batch_size = hidden.size(1)
      hidden = hidden.transpose(0, 1).contiguous().view(1, batch_size,
                                                        self.hidden_size * self.num_directions)
    return output, hidden 
开发者ID:ymfa,项目名称:seq2seq-summarizer,代码行数:27,代码来源:model.py

示例15: forward

# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pad_packed_sequence [as 别名]
def forward(self, padded_seqs, seq_lengths, hidden_state=None):  # pylint: disable=W0221
        """
        Performs a forward pass on the model. Note: you pass the **whole** sequence.
        :param padded_seqs: Padded input tensor (batch_size, seq_size).
        :param seq_lengths: Length of each sequence in the batch.
        :param hidden_state: Hidden state tensor.
        :return: A tuple with the output state and the output hidden state.
        """
        batch_size = padded_seqs.size(0)
        if hidden_state is None:
            size = (self.num_layers, batch_size, self.num_dimensions)
            hidden_state = [torch.zeros(*size).cuda(), torch.zeros(*size).cuda()]

        padded_encoded_seqs = self._embedding(padded_seqs)  # (batch,seq,embedding)
        packed_encoded_seqs = tnnur.pack_padded_sequence(
            padded_encoded_seqs, seq_lengths, batch_first=True, enforce_sorted=False)
        packed_encoded_seqs, hidden_state = self._rnn(packed_encoded_seqs, hidden_state)
        padded_encoded_seqs, _ = tnnur.pad_packed_sequence(packed_encoded_seqs, batch_first=True)

        mask = (padded_encoded_seqs[:, :, 0] != 0).unsqueeze(dim=-1).type(torch.float)
        logits = self._linear(padded_encoded_seqs)*mask
        return (logits, hidden_state) 
开发者ID:undeadpixel,项目名称:reinvent-randomized,代码行数:24,代码来源:model.py


注:本文中的torch.nn.utils.rnn.pad_packed_sequence方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。