当前位置: 首页>>代码示例>>Python>>正文


Python backend.rnn方法代码示例

本文整理汇总了Python中keras.backend.rnn方法的典型用法代码示例。如果您正苦于以下问题:Python backend.rnn方法的具体用法?Python backend.rnn怎么用?Python backend.rnn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.rnn方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def call(self, x, mask=None):
        input_shape = self.input_spec[0].shape
        en_seq = x
        x_input = x[:, input_shape[1]-1, :]
        x_input = K.repeat(x_input, input_shape[1])
        initial_states = self.get_initial_states(x_input)

        constants = super(PointerLSTM, self).get_constants(x_input)
        constants.append(en_seq)
        preprocessed_input = self.preprocess_input(x_input)

        last_output, outputs, states = K.rnn(self.step, preprocessed_input,
                                             initial_states,
                                             go_backwards=self.go_backwards,
                                             constants=constants,
                                             input_length=input_shape[1])

        return outputs 
开发者ID:zygmuntz,项目名称:pointer-networks-experiments,代码行数:20,代码来源:PointerLSTM.py

示例2: _forward

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def _forward(x, reduce_step, initial_states, U, mask=None):
    '''Forward recurrence of the linear chain crf.'''

    def _forward_step(energy_matrix_t, states):
        alpha_tm1 = states[-1]
        new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
        return new_states[0], new_states

    U_shared = K.expand_dims(K.expand_dims(U, 0), 0)

    if mask is not None:
        mask = K.cast(mask, K.floatx())
        mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
        U_shared = U_shared * mask_U

    inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
    inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)

    last, values, _ = K.rnn(_forward_step, inputs, initial_states)
    return last, values 
开发者ID:UKPLab,项目名称:elmo-bilstm-cnn-crf,代码行数:22,代码来源:ChainCRF.py

示例3: _backward

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def _backward(gamma, mask):
    '''Backward recurrence of the linear chain crf.'''
    gamma = K.cast(gamma, 'int32')

    def _backward_step(gamma_t, states):
        y_tm1 = K.squeeze(states[0], 0)
        y_t = batch_gather(gamma_t, y_tm1)
        return y_t, [K.expand_dims(y_t, 0)]

    initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)]
    _, y_rev, _ = K.rnn(_backward_step,
                        gamma,
                        initial_states,
                        go_backwards=True)
    y = K.reverse(y_rev, 1)

    if mask is not None:
        mask = K.cast(mask, dtype='int32')
        # mask output
        y *= mask
        # set masked values to -1
        y += -(1 - mask)
    return y 
开发者ID:UKPLab,项目名称:elmo-bilstm-cnn-crf,代码行数:25,代码来源:ChainCRF.py

示例4: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def call(self, x, mask=None):
        input_shape = self.input_spec[0].shape
        initial_states = self.get_initial_states(x)
        constants = self.get_constants(x)
        preprocessed_input = self.preprocess_input(x)

        last_output, outputs, states = K.rnn(self.step, preprocessed_input,
                                             initial_states,
                                             go_backwards=False,
                                             mask=mask,
                                             constants=constants,
                                             unroll=False,
                                             input_length=input_shape[1])

        if last_output.ndim == 3:
            last_output = K.expand_dims(last_output, dim=0)

        return last_output 
开发者ID:marcellacornia,项目名称:sam,代码行数:20,代码来源:attentive_convlstm.py

示例5: _forward

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def _forward(x, reduce_step, initial_states, U, mask=None):
    """Forward recurrence of the linear chain crf."""

    def _forward_step(energy_matrix_t, states):
        alpha_tm1 = states[-1]
        new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
        return new_states[0], new_states

    U_shared = K.expand_dims(K.expand_dims(U, 0), 0)

    if mask is not None:
        mask = K.cast(mask, K.floatx())
        mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
        U_shared = U_shared * mask_U

    inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
    inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)

    last, values, _ = K.rnn(_forward_step, inputs, initial_states)
    return last, values 
开发者ID:kermitt2,项目名称:delft,代码行数:22,代码来源:layers.py

示例6: _backward

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def _backward(gamma, mask):
    """Backward recurrence of the linear chain crf."""
    gamma = K.cast(gamma, 'int32')

    def _backward_step(gamma_t, states):
        y_tm1 = K.squeeze(states[0], 0)
        y_t = batch_gather(gamma_t, y_tm1)
        return y_t, [K.expand_dims(y_t, 0)]

    initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)]
    _, y_rev, _ = K.rnn(_backward_step,
                        gamma,
                        initial_states,
                        go_backwards=True)
    y = K.reverse(y_rev, 1)

    if mask is not None:
        mask = K.cast(mask, dtype='int32')
        # mask output
        y *= mask
        # set masked values to -1
        y += -(1 - mask)
    return y 
开发者ID:kermitt2,项目名称:delft,代码行数:25,代码来源:layers.py

示例7: step

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def step(self, input_energy_t, states, return_logZ=True):
        # not in the following  `prev_target_val` has shape = (B, F)
        # where B = batch_size, F = output feature dim
        # Note: `i` is of float32, due to the behavior of `K.rnn`
        prev_target_val, i, chain_energy = states[:3]
        t = K.cast(i[0, 0], dtype='int32')
        if len(states) > 3:
            if K.backend() == 'theano':
                m = states[3][:, t:(t + 2)]
            else:
                m = K.tf.slice(states[3], [0, t], [-1, 2])
            input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
            chain_energy = chain_energy * K.expand_dims(K.expand_dims(m[:, 0] * m[:, 1]))  # (1, F, F)*(B, 1, 1) -> (B, F, F)
        if return_logZ:
            energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)  # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
            new_target_val = K.logsumexp(-energy, 1)  # shapes: (B, F)
            return new_target_val, [new_target_val, i + 1]
        else:
            energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
            min_energy = K.min(energy, 1)
            argmin_table = K.cast(K.argmin(energy, 1), K.floatx())  # cast for tf-version `K.rnn`
            return argmin_table, [min_energy, i + 1] 
开发者ID:yongyuwen,项目名称:sequence-tagging-ner,代码行数:24,代码来源:layers.py

示例8: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def call(self, x, mask=None, **kwargs):
        input_shape = K.int_shape(x)
        res = super(ShareableGRU, self).call(x, mask, **kwargs)
        self.input_spec = [InputSpec(shape=(self.input_spec[0].shape[0],
                                            None,
                                            self.input_spec[0].shape[2]))]
        if K.ndim(x) == K.ndim(res):
            # A recent change in Keras
            # (https://github.com/fchollet/keras/commit/a9b6bef0624c67d6df1618ca63d8e8141b0df4d0)
            # made it so that K.rnn with a tensorflow backend does not retain shape information for
            # the sequence length, even if it's present in the input.  We need to fix that here so
            # that our models have the right shape information.  A simple K.reshape is good enough
            # to fix this.
            result_shape = K.int_shape(res)
            if input_shape[1] is not None and result_shape[1] is None:
                shape = (input_shape[0] if input_shape[0] is not None else -1,
                         input_shape[1], result_shape[2])
                res = K.reshape(res, shape=shape)
        return res 
开发者ID:allenai,项目名称:deep_qa,代码行数:21,代码来源:shareable_gru.py

示例9: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def call(self, x, mask=None):
        # input_shape = (batch_size, input_length, input_dim). This needs to be defined in build.
        read_output, initial_memory_states, output_mask = self.read(x, mask)
        initial_write_states = self.writer.get_initial_states(read_output)  # h_0 and c_0 of the writer LSTM
        initial_states = initial_memory_states + initial_write_states
        # last_output: (batch_size, output_dim)
        # all_outputs: (batch_size, input_length, output_dim)
        # last_states:
        #       last_memory_state: (batch_size, input_length, output_dim)
        #       last_output
        #       last_writer_ct
        last_output, all_outputs, last_states = K.rnn(self.compose_and_write_step, read_output, initial_states,
                                                      mask=output_mask)
        last_memory = last_states[0]
        if self.return_mode == "last_output":
            return last_output
        elif self.return_mode == "all_outputs":
            return all_outputs
        else:
            # return mode is output_and_memory
            expanded_last_output = K.expand_dims(last_output, dim=1)  # (batch_size, 1, output_dim)
            # (batch_size, 1+input_length, output_dim)
            return K.concatenate([expanded_last_output, last_memory], axis=1) 
开发者ID:pdasigi,项目名称:neural-semantic-encoders,代码行数:25,代码来源:nse.py

示例10: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def call(self, x, mask=None):
        input_shape = self.input_spec[0].shape
        if self.layer.unroll and input_shape[1] is None:
            raise ValueError('Cannot unroll a RNN if the '
                             'time dimension is undefined. \n'
                             '- If using a Sequential model, '
                             'specify the time dimension by passing '
                             'an `input_shape` or `batch_input_shape` '
                             'argument to your first layer. If your '
                             'first layer is an Embedding, you can '
                             'also use the `input_length` argument.\n'
                             '- If using the functional API, specify '
                             'the time dimension by passing a `shape` '
                             'or `batch_shape` argument to your Input layer.')

        initial_states = (self.layer.states if self.layer.stateful else
                          self.layer.get_initial_states(x))
        constants = self.get_constants(x)
        preprocessed_input = self.layer.preprocess_input(x)

        last_output, outputs, states = K.rnn(
            self.step, preprocessed_input, initial_states,
            go_backwards=self.layer.go_backwards,
            mask=mask,
            constants=constants,
            unroll=self.layer.unroll,
            input_length=input_shape[1])

        if self.layer.stateful:
            updates = []
            for i in range(len(states)):
                updates.append((self.layer.states[i], states[i]))
            self.add_update(updates, x)

        return outputs if self.layer.return_sequences else last_output 
开发者ID:codekansas,项目名称:gandlf,代码行数:37,代码来源:attention.py

示例11: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def call(self, x, mask=None):

		input_shape = self.input_spec[0].shape

		# state format: [h(t-1), c(t-1), y(t-1)]
		#h_0 = K.zeros_like(x[:, 0, :])
		#c_0 = K.zeros_like(x[:, 0, :])
		h_0 = K.reshape(x, (-1, self.input_dim))
		c_0 = K.reshape(x, (-1, self.input_dim))
		initial_states = [h_0, c_0]

		#self.states = [None, None]
		#initial_states = self.get_initial_states(x)

		last_output, outputs, states = K.rnn(step_function=self.step, 
                                             inputs=x,
                                             initial_states=initial_states,
                                             go_backwards=self.go_backwards,
                                             mask=mask,
                                             constants=None,
                                             unroll=self.unroll,
                                             input_length=input_shape[1])

		if self.return_sequences:
			return outputs
		else:
			return last_output 
开发者ID:bnsnapper,项目名称:keras_bn_library,代码行数:29,代码来源:recurrent.py

示例12: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def call(self, x, mask=None):
        # input shape: (nb_samples, time (padded with zeros), input_dim)
        # note that the .build() method of subclasses MUST define
        # self.input_spec with a complete input shape.
        input_shape = self.input_spec[0].shape
        if K._BACKEND == 'tensorflow':
            if not input_shape[1]:
                raise Exception('When using TensorFlow, you should define '
                                'explicitly the number of timesteps of '
                                'your sequences.\n'
                                'If your first layer is an Embedding, '
                                'make sure to pass it an "input_length" '
                                'argument. Otherwise, make sure '
                                'the first layer has '
                                'an "input_shape" or "batch_input_shape" '
                                'argument, including the time axis. '
                                'Found input shape at layer ' + self.name +
                                ': ' + str(input_shape))
        if self.layer.stateful:
            initial_states = self.layer.states
        else:
            initial_states = self.layer.get_initial_states(x)
        constants = self.get_constants(x)
        preprocessed_input = self.layer.preprocess_input(x)

        last_output, outputs, states = K.rnn(self.step, preprocessed_input,
                                             initial_states,
                                             go_backwards=self.layer.go_backwards,
                                             mask=mask,
                                             constants=constants,
                                             unroll=self.layer.unroll,
                                             input_length=input_shape[1])
        if self.layer.stateful:
            self.updates = []
            for i in range(len(states)):
                self.updates.append((self.layer.states[i], states[i]))

        if self.layer.return_sequences:
            return outputs
        else:
            return last_output 
开发者ID:saurabhmathur96,项目名称:Neural-Chatbot,代码行数:43,代码来源:sequence_blocks.py

示例13: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def call(self, x, use_teacher_forcing=True, training=None):
        # TODO: check that model is loading from .h5 correctly
        # TODO: for now cannot be shared layer
        # (only can it we use (or not use) teacher forcing in all cases simultationsly)

        # this sequence is used only to extract the amount of timesteps (the same as in output sequence)
        fake_input = x
        if isinstance(x, list):
            # teacher forcing for training
            self.x_seq, self.y_true = x
            self.use_teacher_forcing = use_teacher_forcing
            fake_input = K.expand_dims(self.y_true)
        else:
            # inference
            self.x_seq = x
            self.use_teacher_forcing = False

        # apply a dense layer over the time dimension of the sequence
        # do it here because it doesn't depend on any previous steps
        # therefore we can save computation time:
        self._uxpb = _time_distributed_dense(self.x_seq, self.U_a, b=self.b_a,
                                             dropout=self.dropout,
                                             input_dim=self.input_dim,
                                             timesteps=self.timesteps,
                                             output_dim=self.units,
                                             training=training)

        last_output, outputs, states = K.rnn(
            self.step,
            inputs=fake_input,
            initial_states=self.get_initial_state(self.x_seq)
        )
        return outputs 
开发者ID:asmekal,项目名称:keras-monotonic-attention,代码行数:35,代码来源:attention_decoder.py

示例14: step

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def step(self, input_energy_t, states, return_logZ=True):
        # not in the following  `prev_target_val` has shape = (B, F)
        # where B = batch_size, F = output feature dim
        # Note: `i` is of float32, due to the behavior of `K.rnn`
        prev_target_val, i, chain_energy = states[:3]
        t = K.cast(i[0, 0], dtype='int32')
        if len(states) > 3:
            if K.backend() == 'theano':
                m = states[3][:, t:(t + 2)]
            else:
                m = K.slice(states[3], [0, t], [-1, 2])
            input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
            # (1, F, F)*(B, 1, 1) -> (B, F, F)
            chain_energy = chain_energy * K.expand_dims(
                K.expand_dims(m[:, 0] * m[:, 1]))
        if return_logZ:
            # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
            energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)
            new_target_val = K.logsumexp(-energy, 1)  # shapes: (B, F)
            return new_target_val, [new_target_val, i + 1]
        else:
            energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
            min_energy = K.min(energy, 1)
            # cast for tf-version `K.rnn
            argmin_table = K.cast(K.argmin(energy, 1), K.floatx())
            return argmin_table, [min_energy, i + 1] 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:28,代码来源:crf.py

示例15: test_rnn_no_states

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import rnn [as 别名]
def test_rnn_no_states(self):
        # implement a simple RNN without states
        input_dim = 8
        output_dim = 4
        timesteps = 5

        _, x = parse_shape_or_val((32, timesteps, input_dim))
        _, wi = parse_shape_or_val((input_dim, output_dim))

        x_k = K.variable(x)
        wi_k = K.variable(wi)

        def rnn_fn(x_k, h_k):
            assert len(h_k) == 0
            y_k = K.dot(x_k, wi_k)
            return y_k, []

        last_y1, y1, h1 = ref_rnn(x, [wi, None, None], None,
                                  go_backwards=False, mask=None)
        last_y2, y2, h2 = K.rnn(rnn_fn, x_k, [],
                                go_backwards=False, mask=None)

        assert len(h2) == 0
        last_y2 = K.eval(last_y2)
        y2 = K.eval(y2)

        assert_allclose(last_y1, last_y2, atol=1e-05)
        assert_allclose(y1, y2, atol=1e-05) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:30,代码来源:backend_test.py


注:本文中的keras.backend.rnn方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。