当前位置: 首页>>代码示例>>Python>>正文


Python rnn.LSTM属性代码示例

本文整理汇总了Python中torch.nn.modules.rnn.LSTM属性的典型用法代码示例。如果您正苦于以下问题:Python rnn.LSTM属性的具体用法?Python rnn.LSTM怎么用?Python rnn.LSTM使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在torch.nn.modules.rnn的用法示例。


在下文中一共展示了rnn.LSTM属性的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_dropout_is_not_applied_to_output_or_returned_hidden_states

# 需要导入模块: from torch.nn.modules import rnn [as 别名]
# 或者: from torch.nn.modules.rnn import LSTM [as 别名]
def test_dropout_is_not_applied_to_output_or_returned_hidden_states(self):
        sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
            self.random_tensor, self.sequence_lengths
        )
        tensor = pack_padded_sequence(
            sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
        )
        lstm = AugmentedLstm(10, 11, recurrent_dropout_probability=0.5)

        output, (hidden_state, _) = lstm(tensor)
        output_sequence, _ = pad_packed_sequence(output, batch_first=True)
        # Test returned output sequence
        num_hidden_dims_zero_across_timesteps = ((output_sequence.sum(1) == 0).sum()).item()
        # If this is not True then dropout has been applied to the output of the LSTM
        assert not num_hidden_dims_zero_across_timesteps
        # Should not have dropout applied to the last hidden state as this is not used
        # within the LSTM and makes it more consistent with the `torch.nn.LSTM` where
        # dropout is not applied to any of it's output. This would also make it more
        # consistent with the Keras LSTM implementation as well.
        hidden_state = hidden_state.squeeze()
        num_hidden_dims_zero_across_timesteps = ((hidden_state == 0).sum()).item()
        assert not num_hidden_dims_zero_across_timesteps 
开发者ID:allenai,项目名称:allennlp,代码行数:24,代码来源:augmented_lstm_test.py

示例2: __init__

# 需要导入模块: from torch.nn.modules import rnn [as 别名]
# 或者: from torch.nn.modules.rnn import LSTM [as 别名]
def __init__(self, num_embeddings, num_labels):
        super(Net, self).__init__()
        self.emb = torch.nn.Embedding(num_embeddings, Config.embedding_dim, padding_idx=0)
        self.lstm1 = LSTM(Config.embedding_dim, Config.hidden_size, num_layers=1, batch_first=True, bias=True, dropout=Config.dropout, bidirectional=True)
        self.lstm2 = LSTM(Config.embedding_dim, Config.hidden_size, num_layers=1, batch_first=True, bias=True, dropout=Config.dropout, bidirectional=True)
        self.linear = torch.nn.Linear(Config.hidden_size*4, num_labels)
        self.loss = torch.nn.CrossEntropyLoss()
        self.pred = torch.nn.Softmax()

        self.h0 = Variable(torch.zeros(2,Config.batch_size, Config.hidden_size))
        self.c0 = Variable(torch.zeros(2,Config.batch_size, Config.hidden_size))
        self.h1 = Variable(torch.zeros(2,Config.batch_size, Config.hidden_size))
        self.c1 = Variable(torch.zeros(2,Config.batch_size, Config.hidden_size))

        if Config.cuda:
            self.h0 = self.h0.cuda()
            self.c0 = self.c0.cuda()
            self.h1 = self.h1.cuda()
            self.c1 = self.c1.cuda() 
开发者ID:JD-AI-Research-Silicon-Valley,项目名称:SACN,代码行数:21,代码来源:snli_verbose.py

示例3: test_augmented_lstm_computes_same_function_as_pytorch_lstm

# 需要导入模块: from torch.nn.modules import rnn [as 别名]
# 或者: from torch.nn.modules.rnn import LSTM [as 别名]
def test_augmented_lstm_computes_same_function_as_pytorch_lstm(self):
        augmented_lstm = AugmentedLstm(10, 11)
        pytorch_lstm = LSTM(10, 11, num_layers=1, batch_first=True)
        # Initialize all weights to be == 1.
        constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(augmented_lstm)
        initializer(pytorch_lstm)

        initial_state = torch.zeros([1, 5, 11])
        initial_memory = torch.zeros([1, 5, 11])

        # Use bigger numbers to avoid floating point instability.
        sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
            self.random_tensor * 5.0, self.sequence_lengths
        )
        lstm_input = pack_padded_sequence(
            sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
        )

        augmented_output, augmented_state = augmented_lstm(
            lstm_input, (initial_state, initial_memory)
        )
        pytorch_output, pytorch_state = pytorch_lstm(lstm_input, (initial_state, initial_memory))
        pytorch_output_sequence, _ = pad_packed_sequence(pytorch_output, batch_first=True)
        augmented_output_sequence, _ = pad_packed_sequence(augmented_output, batch_first=True)

        numpy.testing.assert_array_almost_equal(
            pytorch_output_sequence.data.numpy(), augmented_output_sequence.data.numpy(), decimal=4
        )
        numpy.testing.assert_array_almost_equal(
            pytorch_state[0].data.numpy(), augmented_state[0].data.numpy(), decimal=4
        )
        numpy.testing.assert_array_almost_equal(
            pytorch_state[1].data.numpy(), augmented_state[1].data.numpy(), decimal=4
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:38,代码来源:augmented_lstm_test.py

示例4: forward

# 需要导入模块: from torch.nn.modules import rnn [as 别名]
# 或者: from torch.nn.modules.rnn import LSTM [as 别名]
def forward(self, data, time_steps, run_backwards = True):
		# IMPORTANT: assumes that 'data' already has mask concatenated to it 

		# data shape: [n_traj, n_tp, n_dims]
		# shape required for rnn: (seq_len, batch, input_size)
		# t0: not used here
		n_traj = data.size(0)

		assert(not torch.isnan(data).any())
		assert(not torch.isnan(time_steps).any())

		data = data.permute(1,0,2) 

		if run_backwards:
			# Look at data in the reverse order: from later points to the first
			data = utils.reverse(data)

		if self.use_delta_t:
			delta_t = time_steps[1:] - time_steps[:-1]
			if run_backwards:
				# we are going backwards in time with
				delta_t = utils.reverse(delta_t)
			# append zero delta t in the end
			delta_t = torch.cat((delta_t, torch.zeros(1).to(self.device)))
			delta_t = delta_t.unsqueeze(1).repeat((1,n_traj)).unsqueeze(-1)
			data = torch.cat((delta_t, data),-1)

		outputs, _ = self.gru_rnn(data)

		# LSTM output shape: (seq_len, batch, num_directions * hidden_size)
		last_output = outputs[-1]

		self.extra_info ={"rnn_outputs": outputs, "time_points": time_steps}

		mean, std = utils.split_last_dim(self.hiddens_to_z0(last_output))
		std = std.abs()

		assert(not torch.isnan(mean).any())
		assert(not torch.isnan(std).any())

		return mean.unsqueeze(0), std.unsqueeze(0) 
开发者ID:YuliaRubanova,项目名称:latent_ode,代码行数:43,代码来源:encoder_decoder.py

示例5: test_augmented_lstm_computes_same_function_as_pytorch_lstm

# 需要导入模块: from torch.nn.modules import rnn [as 别名]
# 或者: from torch.nn.modules.rnn import LSTM [as 别名]
def test_augmented_lstm_computes_same_function_as_pytorch_lstm(self):
        augmented_lstm = AugmentedLstm(10, 11)
        pytorch_lstm = LSTM(10, 11, num_layers=1, batch_first=True)
        # Initialize all weights to be == 1.
        initializer = InitializerApplicator([(u".*", lambda tensor: torch.nn.init.constant_(tensor, 1.))])
        initializer(augmented_lstm)
        initializer(pytorch_lstm)

        initial_state = torch.zeros([1, 5, 11])
        initial_memory = torch.zeros([1, 5, 11])

        # Use bigger numbers to avoid floating point instability.
        sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(self.random_tensor * 5., self.sequence_lengths)
        lstm_input = pack_padded_sequence(sorted_tensor, sorted_sequence.data.tolist(), batch_first=True)

        augmented_output, augmented_state = augmented_lstm(lstm_input, (initial_state, initial_memory))
        pytorch_output, pytorch_state = pytorch_lstm(lstm_input, (initial_state, initial_memory))
        pytorch_output_sequence, _ = pad_packed_sequence(pytorch_output, batch_first=True)
        augmented_output_sequence, _ = pad_packed_sequence(augmented_output, batch_first=True)

        numpy.testing.assert_array_almost_equal(pytorch_output_sequence.data.numpy(),
                                                augmented_output_sequence.data.numpy(), decimal=4)
        numpy.testing.assert_array_almost_equal(pytorch_state[0].data.numpy(),
                                                augmented_state[0].data.numpy(), decimal=4)
        numpy.testing.assert_array_almost_equal(pytorch_state[1].data.numpy(),
                                                augmented_state[1].data.numpy(), decimal=4) 
开发者ID:plasticityai,项目名称:magnitude,代码行数:28,代码来源:augmented_lstm_test.py

示例6: __init__

# 需要导入模块: from torch.nn.modules import rnn [as 别名]
# 或者: from torch.nn.modules.rnn import LSTM [as 别名]
def __init__(
        self,
        encoder_output_dim: int,
        action_embedding_dim: int,
        input_attention: Attention,
        activation: Activation = Activation.by_name("relu")(),
        add_action_bias: bool = True,
        dropout: float = 0.0,
        num_layers: int = 1,
    ) -> None:
        super().__init__()
        self._input_attention = input_attention
        self._add_action_bias = add_action_bias
        self._activation = activation
        self._num_layers = num_layers

        # Decoder output dim needs to be the same as the encoder output dim since we initialize the
        # hidden state of the decoder with the final hidden state of the encoder.
        output_dim = encoder_output_dim
        input_dim = output_dim
        # Our decoder input will be the concatenation of the attended encoder hidden state (i.e.,
        # the attended question encoding) and the previous action embedding, and we'll project that
        # down to the decoder's `input_dim`, which we arbitrarily set to be the same as
        # `output_dim`.
        self._input_projection_layer = Linear(encoder_output_dim + action_embedding_dim, input_dim)
        # Before making a prediction, we'll compute an attention over the input given our updated
        # hidden state. Then we concatenate those with the decoder state and project to
        # `action_embedding_dim` to make a prediction.
        self._output_projection_layer = Linear(
            output_dim + encoder_output_dim, action_embedding_dim
        )
        if self._num_layers > 1:
            self._decoder_cell = LSTM(input_dim, output_dim, self._num_layers)
        else:
            # We use a ``LSTMCell`` if we just have one layer because it is slightly faster since we are
            # just running the LSTM for one step each time.
            self._decoder_cell = LSTMCell(input_dim, output_dim)

        if dropout > 0:
            self._dropout = torch.nn.Dropout(p=dropout)
        else:
            self._dropout = lambda x: x 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:44,代码来源:basic_transition_function.py


注:本文中的torch.nn.modules.rnn.LSTM属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。