本文整理汇总了Python中torch.nn.utils.rnn.pack_padded_sequence方法的典型用法代码示例。如果您正苦于以下问题:Python rnn.pack_padded_sequence方法的具体用法?Python rnn.pack_padded_sequence怎么用?Python rnn.pack_padded_sequence使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.utils.rnn
的用法示例。
在下文中一共展示了rnn.pack_padded_sequence方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, word, sentence_length):
"""
:param word:
:param sentence_length:
:param desorted_indices:
:return:
"""
word, sentence_length, desorted_indices = prepare_pack_padded_sequence(word, sentence_length, device=self.device)
x = self.embed(word) # (N,W,D)
x = self.dropout_embed(x)
packed_embed = pack_padded_sequence(x, sentence_length, batch_first=True)
x, _ = self.bilstm(packed_embed)
x, _ = pad_packed_sequence(x, batch_first=True)
x = x[desorted_indices]
x = self.dropout(x)
x = torch.tanh(x)
logit = self.linear(x)
return logit
示例2: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, encoding, lengths):
lengths = Variable(torch.LongTensor(lengths))
if torch.cuda.is_available():
lengths = lengths.cuda()
if self.method == 'mean':
encoding_pad = nn.utils.rnn.pack_padded_sequence(encoding, lengths.data.tolist(), batch_first=True)
encoding = nn.utils.rnn.pad_packed_sequence(encoding_pad, batch_first=True, padding_value=0)[0]
lengths = lengths.float().view(-1, 1)
return encoding.sum(1) / lengths, None
elif self.method == 'max':
return encoding.max(1) # [bsz, in_dim], [bsz, in_dim] (position)
elif self.method == 'attn':
size = encoding.size() # [bsz, len, in_dim]
x_flat = encoding.contiguous().view(-1, size[2]) # [bsz*len, in_dim]
hbar = self.tanh(self.ws1(x_flat)) # [bsz*len, attn_hid]
alphas = self.ws2(hbar).view(size[0], size[1]) # [bsz, len]
alphas = nn.utils.rnn.pack_padded_sequence(alphas, lengths.data.tolist(), batch_first=True)
alphas = nn.utils.rnn.pad_packed_sequence(alphas, batch_first=True, padding_value=-1e8)[0]
alphas = functional.softmax(alphas, dim=1) # [bsz, len]
alphas = alphas.view(size[0], 1, size[1]) # [bsz, 1, len]
return torch.bmm(alphas, encoding).squeeze(1), alphas # [bsz, in_dim], [bsz, len]
elif self.method == 'last':
return torch.cat([encoding[i][lengths[i] - 1] for i in range(encoding.size(0))], dim=0), None
示例3: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, x, x_len, atten_mask):
CoAtt = torch.bmm(x, x.transpose(1, 2))
CoAtt = atten_mask.unsqueeze(1) * CoAtt - (1 - atten_mask).unsqueeze(1) * INF
CoAtt = torch.softmax(CoAtt, dim=-1)
new_x = torch.cat([torch.bmm(CoAtt, x), x], -1)
sorted_x_len, indx = torch.sort(x_len, 0, descending=True)
new_x = pack_padded_sequence(new_x[indx], sorted_x_len.data.tolist(), batch_first=True)
h0 = to_cuda(torch.zeros(2, x_len.size(0), self.hidden_size // 2), self.use_cuda)
c0 = to_cuda(torch.zeros(2, x_len.size(0), self.hidden_size // 2), self.use_cuda)
packed_h, (packed_h_t, _) = self.model(new_x, (h0, c0))
# restore the sorting
_, inverse_indx = torch.sort(indx, 0)
packed_h_t = torch.cat([packed_h_t[i] for i in range(packed_h_t.size(0))], -1)
restore_packed_h_t = packed_h_t[inverse_indx]
output = restore_packed_h_t
return output
示例4: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, src, lengths=None, encoder_state=None):
"See :obj:`EncoderBase.forward()`"
self._check_args(src, lengths, encoder_state)
emb = self.embeddings(src)
s_len, batch, emb_dim = emb.size()
packed_emb = emb
if lengths is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Variable.
lengths = lengths.view(-1).tolist()
packed_emb = pack(emb, lengths)
memory_bank, encoder_final = self.rnn(packed_emb, encoder_state)
if lengths is not None and not self.no_pack_padded_seq:
memory_bank = unpack(memory_bank)[0]
if self.use_bridge:
encoder_final = self._bridge(encoder_final)
return encoder_final, memory_bank
示例5: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, inputs):
x, seq_lengths = inputs
embedding = self.embeddings(x).permute(0, 2, 1)
conv1 = self.conv1(embedding).permute(0, 2, 1)
conv2 = self.conv2(embedding).permute(0, 2, 1)
out = torch.stack((conv1, conv2), 3)
out, _ = torch.max(out, 3)
packed_input = pack_padded_sequence(
out,
seq_lengths.cpu().numpy(),
batch_first=True
)
packed_output, (ht, ct) = self.lstm(packed_input)
out, input_sizes = pad_packed_sequence(packed_output, batch_first=True)
out = F.relu(self.linear1(out))
out = self.linear2(out)
out = out.view(-1)
return out
示例6: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, inputs):
x, seq_lengths = inputs
embedding = self.embeddings(x)
packed_input = pack_padded_sequence(
embedding,
seq_lengths.cpu().numpy(),
batch_first=True
)
packed_output, (ht, ct) = self.lstm(packed_input)
output, input_sizes = pad_packed_sequence(packed_output, batch_first=True)
out = F.relu(self.linear1(output))
out = self.linear2(out)
out = out.view(-1)
return out
示例7: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, inputs):
x, seq_lengths = inputs
embedding = self.embeddings(x)
packed_input = pack_padded_sequence(
embedding,
seq_lengths.cpu().numpy(),
batch_first=True
)
packed_output, (ht, ct) = self.lstm(packed_input)
output, input_sizes = pad_packed_sequence(packed_output, batch_first=True)
output = F.relu(self.conv1(output.permute(0, 2, 1)).permute(0, 2, 1))
out = F.relu(self.linear1(output))
out = self.linear2(out)
out = out.view(-1)
return out
示例8: _defns_to_packed_seq
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def _defns_to_packed_seq(defns, field, cuda=torch.cuda.is_available(), volatile=False):
"""
Pads a list of definitions (in sorted order!)
:param tokenized_defns: List of lists containing tokenized definitions OR
List of string containind definitions
:param field: Contains padding and vocab functions.
:param cuda: if true, we'll cudaize it
:param volatile:
:return: PackedSequence with a Variable.
"""
tokenized_defns = [field.preprocess(x) for x in defns]
defns_padded, lengths = field.pad(tokenized_defns)
if not all(lengths[i] >= lengths[i + 1] for i in range(len(lengths) - 1)):
raise ValueError("Sequences must be in decreasing order")
defns_tensor = torch.LongTensor([
[field.vocab.stoi[x] for x in ex] for ex in defns_padded
])
defns_packed_ = pack_padded_sequence(defns_tensor, lengths, batch_first=True)
packed_data = Variable(defns_packed_.data, volatile=volatile)
if cuda:
packed_data = packed_data.cuda()
return PackedSequence(packed_data, defns_packed_.batch_sizes)
示例9: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, src, lengths=None):
"See :obj:`EncoderBase.forward()`"
self._check_args(src, lengths)
emb = self.embeddings(src)
# s_len, batch, emb_dim = emb.size()
packed_emb = emb
if lengths is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Tensor.
lengths_list = lengths.view(-1).tolist()
packed_emb = pack(emb, lengths_list)
memory_bank, encoder_final = self.rnn(packed_emb)
if lengths is not None and not self.no_pack_padded_seq:
memory_bank = unpack(memory_bank)[0]
if self.use_bridge:
encoder_final = self._bridge(encoder_final)
return encoder_final, memory_bank, lengths
示例10: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, x, lengths):
"""Handles variable size captions
"""
# Embed word ids to vectors
x = self.embed(x)
packed = pack_padded_sequence(x, lengths, batch_first=True)
# Forward propagate RNN
out, _ = self.rnn(packed)
# Reshape *final* output to (batch_size, hidden_size)
padded = pad_packed_sequence(out, batch_first=True)
cap_emb, cap_len = padded
if self.use_bi_gru:
cap_emb = (cap_emb[:,:,:cap_emb.size(2)/2] + cap_emb[:,:,cap_emb.size(2)/2:])/2
# normalization in the joint embedding space
if not self.no_txtnorm:
cap_emb = l2norm(cap_emb, dim=-1)
return cap_emb, cap_len
示例11: apply_packed_sequence
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def apply_packed_sequence(rnn, embedding, lengths):
""" Runs a forward pass of embeddings through an rnn using packed sequence.
Args:
rnn: The RNN that that we want to compute a forward pass with.
embedding (FloatTensor b x seq x dim): A batch of sequence embeddings.
lengths (LongTensor batch): The length of each sequence in the batch.
Returns:
output: The output of the RNN `rnn` with input `embedding`
"""
# Sort Batch by sequence length
lengths_sorted, permutation = torch.sort(lengths, descending=True)
embedding_sorted = embedding[permutation]
# Use Packed Sequence
embedding_packed = pack(embedding_sorted, lengths_sorted, batch_first=True)
outputs_packed, (hidden, cell) = rnn(embedding_packed)
outputs_sorted, _ = unpack(outputs_packed, batch_first=True)
# Restore original order
_, permutation_rev = torch.sort(permutation, descending=False)
outputs = outputs_sorted[permutation_rev]
hidden, cell = hidden[:, permutation_rev], cell[:, permutation_rev]
return outputs, (hidden, cell)
示例12: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, inp, l, null_mask):
"""
:param inp: shape = (B, T, emb_dim)
:param null_mask: shape = (B, T)
:return:
"""
B = inp.shape[0]
T = inp.shape[1]
inp = inp.transpose(0, 1) # shape = (20, n_batch, emb_dim)
packed_emb = pack(inp, l)
outputs, h_n = self.Encoder(packed_emb) # h_n.shape = (n_layers * n_dir, n_batch, dim_h)
outputs = unpack(outputs, total_length=T)[0] # shape = (20, n_batch, dim_h * n_dir)
h_n = h_n.view(self.n_layers, self.n_dir, B, self.dim_h).transpose(1, 2).transpose(2, 3).contiguous().view(self.n_layers, B, -1)
# shape = (n_layers, n_batch, dim_h * n_dir)
h_n = h_n[-1, :, :] # shape = (n_batch, dim_h * n_dir)
context, att_weight = self.Attention(h_n,
outputs.transpose(0, 1),
null_mask) # (n_batch, dim_h * n_dir), (n_batch, 20)
cls = self.MLP(context).squeeze(1) # shape = (n_batch, )
return cls, att_weight
示例13: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, input_raw, pack=False, input_len=None):
if self.has_input:
input = self.input(input_raw)
input = self.relu(input)
else:
input = input_raw
if pack:
input = pack_padded_sequence(input, input_len, batch_first=True)
output_raw, self.hidden = self.rnn(input, self.hidden)
if pack:
output_raw = pad_packed_sequence(output_raw, batch_first=True)[0]
if self.has_output:
output_raw = self.output(output_raw)
# return hidden state at each time step
return output_raw
# plain GRU model
示例14: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, input, hidden=None):
"""
input: (wrap(srcBatch), wrap(srcBioBatch), lengths)
"""
lengths = input[-1].data.view(-1).tolist() # lengths data is wrapped inside a Variable
wordEmb = self.word_lut(input[0])
emb = pack(wordEmb, lengths)
outputs, hidden_t = self.rnn(emb, hidden)
if isinstance(input, tuple):
outputs = unpack(outputs)[0]
forward_last = hidden_t[0]
backward_last = hidden_t[1]
time_step = outputs.size(0)
batch_size = outputs.size(1)
sentence_vector = torch.cat((forward_last, backward_last), dim=1)
exp_buf = torch.cat((outputs, sentence_vector.unsqueeze(0).expand_as(outputs)), dim=2)
selective_value = self.sigmoid(self.selective_gate(exp_buf.view(-1, exp_buf.size(2))))
selective_value = selective_value.view(time_step, batch_size, -1)
outputs = outputs * selective_value
return hidden_t, outputs
示例15: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import pack_padded_sequence [as 别名]
def forward(self, embedded, hidden, input_lengths=None):
"""
:param embedded: (src seq len, batch size, embed size)
:param hidden: (num directions, batch size, encoder hidden size)
:param input_lengths: list containing the non-padded length of each sequence in this batch;
if set, we use `PackedSequence` to skip the PAD inputs and leave the
corresponding encoder states as zeros
:return: (src seq len, batch size, hidden size * num directions = decoder hidden size)
Perform multi-step encoding.
"""
if input_lengths is not None:
embedded = pack_padded_sequence(embedded, input_lengths)
output, hidden = self.gru(embedded, hidden)
if input_lengths is not None:
output, _ = pad_packed_sequence(output)
if self.num_directions > 1:
# hidden: (num directions, batch, hidden) => (1, batch, hidden * 2)
batch_size = hidden.size(1)
hidden = hidden.transpose(0, 1).contiguous().view(1, batch_size,
self.hidden_size * self.num_directions)
return output, hidden