本文整理汇总了Python中torch.nn.utils.rnn.PackedSequence方法的典型用法代码示例。如果您正苦于以下问题:Python rnn.PackedSequence方法的具体用法?Python rnn.PackedSequence怎么用?Python rnn.PackedSequence使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.utils.rnn
的用法示例。
在下文中一共展示了rnn.PackedSequence方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def forward(self, inputs, hidden=None):
hidden = hidden or tuple([None] * len(self))
next_hidden = []
for i, module in enumerate(self._modules.values()):
output, h = module(inputs, hidden[i])
next_hidden.append(h)
if self.residual and inputs.size(-1) == output.size(-1):
inputs = output + inputs
else:
inputs = output
if isinstance(inputs, PackedSequence):
data = nn.functional.dropout(
inputs.data, self.dropout, self.training)
inputs = PackedSequence(data, inputs.batch_sizes)
else:
inputs = nn.functional.dropout(
inputs, self.dropout, self.training)
return output, tuple(next_hidden)
开发者ID:nadavbh12,项目名称:Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch,代码行数:21,代码来源:recurrent.py
示例2: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def forward(self, input, att_scores=None, hx=None):
if not isinstance(input, PackedSequence) or not isinstance(att_scores, PackedSequence):
raise NotImplementedError("DynamicGRU only supports packed input and att_scores")
input, batch_sizes, sorted_indices, unsorted_indices = input
att_scores, _, _, _ = att_scores
max_batch_size = int(batch_sizes[0])
if hx is None:
hx = torch.zeros(max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
outputs = torch.zeros(input.size(0), self.hidden_size,
dtype=input.dtype, device=input.device)
begin = 0
for batch in batch_sizes:
new_hx = self.rnn(
input[begin:begin + batch],
hx[0:batch],
att_scores[begin:begin + batch])
outputs[begin:begin + batch] = new_hx
hx = new_hx
begin += batch
return PackedSequence(outputs, batch_sizes, sorted_indices, unsorted_indices)
示例3: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def forward(self, defns, word_embeds=None):
"""
Forward pass
:param defns: PackedSequence with definitions
:param word_embeds: [batch_size, array] of word embeddings
:return:
"""
batch_embed = PackedSequence(self.embed(defns.data), defns.batch_sizes)
output, h_n = self.gru(batch_embed)
h_rep = h_n.transpose(0, 1).contiguous().view(-1, self.hidden_size * 2)
h_rep = self.d(h_rep)
if self.embed_input and (word_embeds is None):
raise ValueError("Must supply word embedding")
elif self.embed_input:
h_rep = torch.cat((h_rep, word_embeds),1)
return self.fc(h_rep)
示例4: _defns_to_packed_seq
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def _defns_to_packed_seq(defns, field, cuda=torch.cuda.is_available(), volatile=False):
"""
Pads a list of definitions (in sorted order!)
:param tokenized_defns: List of lists containing tokenized definitions OR
List of string containind definitions
:param field: Contains padding and vocab functions.
:param cuda: if true, we'll cudaize it
:param volatile:
:return: PackedSequence with a Variable.
"""
tokenized_defns = [field.preprocess(x) for x in defns]
defns_padded, lengths = field.pad(tokenized_defns)
if not all(lengths[i] >= lengths[i + 1] for i in range(len(lengths) - 1)):
raise ValueError("Sequences must be in decreasing order")
defns_tensor = torch.LongTensor([
[field.vocab.stoi[x] for x in ex] for ex in defns_padded
])
defns_packed_ = pack_padded_sequence(defns_tensor, lengths, batch_first=True)
packed_data = Variable(defns_packed_.data, volatile=volatile)
if cuda:
packed_data = packed_data.cuda()
return PackedSequence(packed_data, defns_packed_.batch_sizes)
示例5: nlog_like_of_obs
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def nlog_like_of_obs(self, obs: rnn.PackedSequence):
"""
Here we calculate the negative log likelihood of the sequence. For each we feed in the previous observation
ie if you use this function during training then doing teacher forcing.
"""
# Set up the ground truth inputs from previous time-steps to be fed into the bottom of the RNN
symbol_seq_packed_minus_last = torch_utils.remove_last_from_packed_seq(obs)
embeddings = self.embedder.forward_on_packed_sequence(symbol_seq_packed_minus_last, stops_pre_filtered_flag=True)
inputs = torch_utils.prepend_tensor_to_start_of_packed_seq(embeddings, mchef_config.SOS_TOKEN)
# Feed the emebeddings through the network
initial_hidden = self._initial_hidden_after_update
outputs, _ = self.gru(inputs, initial_hidden)
outputs_mapped = self.mlp_out(outputs.data)
self.decoder_top.update(outputs_mapped)
# Now work out the nll for each element of each sequence and then sum over the whole sequence length.
nll_per_obs = self.decoder_top.nlog_like_of_obs(obs.data)
nll_packed = rnn.PackedSequence(nll_per_obs, *obs[1:])
nll_padded, _ = rnn.pad_packed_sequence(nll_packed, batch_first=True, padding_value=0.0)
nll_per_seq = nll_padded.sum(dim=tuple(range(1, len(nll_padded.shape))))
return nll_per_seq
示例6: prepend_tensor_to_start_of_packed_seq
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def prepend_tensor_to_start_of_packed_seq(packed_seq: rnn.PackedSequence, value_to_add):
"""
This function shifts the whole sequence down and adds value_to_add to the start.
"""
data, batch_sizes, *others = packed_seq
# We're gonna be a bit cheeky and construct a Packed Sequence manually at the bottom of this function -- which the
# docs tell us not to do but have seen others do it, eg
# https://github.com/pytorch/pytorch/issues/8921#issuecomment-400552029
# Originally we coded this in PyTorch 1.0 and PackedSequence was a thinner wrapper on a NamedTuple
# to continue to check that we are still using enforce_sorted=True Packed Sequences
if len(others):
assert others[0] is None
assert others[1] is None
num_in_first_batch =batch_sizes[0]
front = torch.zeros_like(data[:num_in_first_batch])
front[...] = value_to_add
new_packed_seq_data = torch.cat([front, data], dim=0)
new_length_at_beginning = batch_sizes[:1].clone()
new_packed_seq = rnn.PackedSequence(new_packed_seq_data, torch.cat([new_length_at_beginning, packed_seq.batch_sizes]))
return new_packed_seq
示例7: exec_backward_lstm
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def exec_backward_lstm(
self,
inputs: PackedSequence,
) -> List[PackedSequence]:
"""
Backward LSTM.
"""
if self.exec_managed_lstm_bos_eos:
max_batch_size = int(inputs.batch_sizes.data[0])
# EOS.
self.exec_backward_lstm_eos(max_batch_size)
elif self.exec_managed_lstm_reset_states:
self.backward_lstm.reset_states()
# Feed inputs.
outputs, _ = self.backward_lstm(inputs.data, inputs.batch_sizes)
if self.exec_managed_lstm_bos_eos:
# BOS.
self.exec_backward_lstm_bos(max_batch_size)
# To list of `PackedSequence`.
return [PackedSequence(output, inputs.batch_sizes) for output in outputs]
示例8: combine_char_cnn_and_bilstm_outputs
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def combine_char_cnn_and_bilstm_outputs(
self,
char_cnn_packed: PackedSequence,
bilstm_packed: List[PackedSequence],
) -> List[PackedSequence]:
"""
Combine the outputs of Char CNN & BiLSTM for scalar mix.
"""
# Simply duplicate the output of char cnn.
duplicated_char_cnn_packed = PackedSequence(
torch.cat([char_cnn_packed.data, char_cnn_packed.data], dim=-1),
char_cnn_packed.batch_sizes,
)
combined = [duplicated_char_cnn_packed]
combined.extend(bilstm_packed)
return combined
示例9: exec_bilstm_and_scalar_mix
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def exec_bilstm_and_scalar_mix(
self,
token_repr: PackedSequence,
) -> List[PackedSequence]:
"""
Common combination.
"""
# BiLSTM.
bilstm_repr = self.exec_bilstm(token_repr)
# Scalar Mix.
conbimed_repr = self.combine_char_cnn_and_bilstm_outputs(
token_repr,
self.concat_packed_sequences(bilstm_repr),
)
mixed_reprs = self.exec_scalar_mix(conbimed_repr)
return mixed_reprs
示例10: edge_ctx
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def edge_ctx(self, obj_feats, obj_dists, im_inds, obj_preds, box_priors=None):
"""
Object context and object classification.
:param obj_feats: [num_obj, img_dim + object embedding0 dim]
:param obj_dists: [num_obj, #classes]
:param im_inds: [num_obj] the indices of the images
:return: edge_ctx: [num_obj, #feats] For later!
"""
# Only use hard embeddings
obj_embed2 = self.obj_embed2(obj_preds)
# obj_embed3 = F.softmax(obj_dists, dim=1) @ self.obj_embed3.weight
inp_feats = torch.cat((obj_embed2, obj_feats), 1)
# Sort by the confidence of the maximum detection.
confidence = F.softmax(obj_dists, dim=1).data.view(-1)[
obj_preds.data + arange(obj_preds.data) * self.num_classes]
perm, inv_perm, ls_transposed = self.sort_rois(im_inds.data, confidence, box_priors)
edge_input_packed = PackedSequence(inp_feats[perm], ls_transposed)
edge_reps = self.edge_ctx_rnn(edge_input_packed)[0][0]
# now we're good! unperm
edge_ctx = edge_reps[inv_perm]
return edge_ctx
示例11: tensors_equal
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def tensors_equal(self, x, y):
""""Test that tensors in diverse containers are equal."""
if isinstance(x, PackedSequence):
return self.tensors_equal(x[0], y[0]) and self.tensors_equal(x[1], y[1])
if isinstance(x, dict):
return (
(x.keys() == y.keys()) and
all(self.tensors_equal(x[k], y[k]) for k in x)
)
if isinstance(x, (list, tuple)):
return all(self.tensors_equal(xi, yi) for xi, yi in zip(x, y))
if x.is_sparse is not y.is_sparse:
return False
if x.is_sparse:
x, y = x.to_dense(), y.to_dense()
return (x == y).all()
# pylint: disable=no-method-argument
示例12: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def forward(self, x, lengths, head=True):
# Apply 2d convolutions
x, lengths = self.conv(x, lengths)
# Pack padded batch of sequences for RNN module
x = pack_padded_sequence(x, lengths)
# Forward pass through GRU
x, _ = self.rnn(x)
# Sum bidirectional GRU outputs
f, b = x.data.split(self.rnn.hidden_size, 1)
data = self.prj(f + b)
if head:
data = self.fc(data)
data = log_softmax(data, dim=-1)
x = PackedSequence(data, x.batch_sizes, x.sorted_indices, x.unsorted_indices)
x, _ = pad_packed_sequence(x)
return x, lengths
示例13: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def forward(self, x: torch.Tensor) -> torch.Tensor:
if not self.training or self.dropout <= 0.:
return x
is_packed = isinstance(x, PackedSequence)
if is_packed:
x, batch_sizes = x
max_batch_size = int(batch_sizes[0])
else:
batch_sizes = None
max_batch_size = x.size(0)
# Drop same mask across entire sequence
if self.batch_first:
m = x.new_empty(max_batch_size, 1, x.size(2), requires_grad=False).bernoulli_(1 - self.dropout)
else:
m = x.new_empty(1, max_batch_size, x.size(2), requires_grad=False).bernoulli_(1 - self.dropout)
x = x.masked_fill(m == 0, 0) / (1 - self.dropout)
if is_packed:
return PackedSequence(x, batch_sizes)
else:
return x
示例14: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def forward(self, input, att_scores, hx=None):
is_packed_input = isinstance(input, PackedSequence)
if not is_packed_input:
raise NotImplementedError(
"DynamicGRU only supports packed input")
is_packed_att_scores = isinstance(att_scores, PackedSequence)
if not is_packed_att_scores:
raise NotImplementedError(
"DynamicGRU only supports packed att_scores")
input, batch_sizes, sorted_indices, unsorted_indices = input
att_scores, _, _, _ = att_scores
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
if hx is None:
hx = torch.zeros(
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
outputs = torch.zeros(
input.size(0), self.hidden_size,
dtype=input.dtype, device=input.device)
begin = 0
for batch in batch_sizes:
new_hx = self.rnn(
input[begin: begin + batch],
hx[0:batch],
att_scores[begin: begin + batch])
outputs[begin: begin + batch] = new_hx
hx = new_hx
begin += batch
return PackedSequence(
outputs, batch_sizes, sorted_indices, unsorted_indices)
示例15: forward
# 需要导入模块: from torch.nn.utils import rnn [as 别名]
# 或者: from torch.nn.utils.rnn import PackedSequence [as 别名]
def forward(self, input, hx=None):
is_packed = isinstance(input, PackedSequence)
if is_packed:
input, batch_sizes = input
max_batch_size = batch_sizes[0]
else:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = torch.autograd.Variable(input.data.new(self.num_layers *
num_directions,
max_batch_size,
self.hidden_size).zero_(), requires_grad=False)
hx = (hx, hx)
has_flat_weights = list(p.data.data_ptr() for p in self.parameters()) == self._data_ptrs
if has_flat_weights:
first_data = next(self.parameters()).data
assert first_data.storage().size() == self._param_buf_size
flat_weight = first_data.new().set_(first_data.storage(), 0, torch.Size([self._param_buf_size]))
else:
flat_weight = None
func = AutogradRNN(
self.input_size,
self.hidden_size,
num_layers=self.num_layers,
batch_first=self.batch_first,
dropout=self.dropout,
train=self.training,
bidirectional=self.bidirectional,
batch_sizes=batch_sizes,
dropout_state=self.dropout_state,
flat_weight=flat_weight
)
output, hidden = func(input, self.all_weights, hx)
if is_packed:
output = PackedSequence(output, batch_sizes)
return output, hidden