当前位置: 首页>>代码示例>>Python>>正文


Python backend.reverse方法代码示例

本文整理汇总了Python中keras.backend.reverse方法的典型用法代码示例。如果您正苦于以下问题:Python backend.reverse方法的具体用法?Python backend.reverse怎么用?Python backend.reverse使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.reverse方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _backward

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def _backward(gamma, mask):
    '''Backward recurrence of the linear chain crf.'''
    gamma = K.cast(gamma, 'int32')

    def _backward_step(gamma_t, states):
        y_tm1 = K.squeeze(states[0], 0)
        y_t = batch_gather(gamma_t, y_tm1)
        return y_t, [K.expand_dims(y_t, 0)]

    initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)]
    _, y_rev, _ = K.rnn(_backward_step,
                        gamma,
                        initial_states,
                        go_backwards=True)
    y = K.reverse(y_rev, 1)

    if mask is not None:
        mask = K.cast(mask, dtype='int32')
        # mask output
        y *= mask
        # set masked values to -1
        y += -(1 - mask)
    return y 
开发者ID:UKPLab,项目名称:elmo-bilstm-cnn-crf,代码行数:25,代码来源:ChainCRF.py

示例2: _backward

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def _backward(gamma, mask):
    """Backward recurrence of the linear chain crf."""
    gamma = K.cast(gamma, 'int32')

    def _backward_step(gamma_t, states):
        y_tm1 = K.squeeze(states[0], 0)
        y_t = batch_gather(gamma_t, y_tm1)
        return y_t, [K.expand_dims(y_t, 0)]

    initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)]
    _, y_rev, _ = K.rnn(_backward_step,
                        gamma,
                        initial_states,
                        go_backwards=True)
    y = K.reverse(y_rev, 1)

    if mask is not None:
        mask = K.cast(mask, dtype='int32')
        # mask output
        y *= mask
        # set masked values to -1
        y += -(1 - mask)
    return y 
开发者ID:kermitt2,项目名称:delft,代码行数:25,代码来源:layers.py

示例3: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def call(self, x):
        shape = K.shape(x)
        x = K.reverse(x, axes=1) # reverse, so that frameness is related to fixed point
        frame_1 = tf.gather(x, K.arange(start=0, stop=shape[1], step=3), axis=1)
        frame_2 = tf.gather(x, K.arange(start=1, stop=shape[1], step=3), axis=1)
        frame_3 = tf.gather(x, K.arange(start=2, stop=shape[1], step=3), axis=1)
        return [frame_1, frame_2, frame_3] 
开发者ID:kipoi,项目名称:models,代码行数:9,代码来源:model.py

示例4: map_charades

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def map_charades(y_true, y_pred):
    """
    Returns mAP
    """
    m_aps = []

    tf_one = tf.constant(1, dtype=tf.float32)

    n_classes = y_pred.shape[1]
    for oc_i in range(n_classes):
        pred_row = y_pred[:, oc_i]
        sorted_idxs = tf_framework.argsort(-pred_row)
        true_row = y_true[:, oc_i]
        true_row = tf.map_fn(lambda i: true_row[i], sorted_idxs, dtype=np.float32)
        tp_poolean = tf.equal(true_row, tf_one)
        tp = tf.cast(tp_poolean, dtype=np.float32)
        fp = K.reverse(tp, axes=0)
        n_pos = tf.reduce_sum(tp)
        f_pcs = tf.cumsum(fp)
        t_pcs = tf.cumsum(tp)
        s = f_pcs + t_pcs

        s = tf.cast(s, tf.float32)
        t_pcs = tf.cast(t_pcs, tf.float32)
        tp_float = tf.cast(tp_poolean, np.float32)

        prec = t_pcs / s
        avg_prec = prec * tp_float

        n_pos = tf.cast(n_pos, tf.float32)
        avg_prec = avg_prec / n_pos
        avg_prec = tf.expand_dims(avg_prec, axis=0)
        m_aps.append(avg_prec)

    m_aps = K.concatenate(m_aps, axis=0)
    mAP = K.mean(m_aps)
    return mAP

# endregion

# region Callbacks 
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:43,代码来源:keras_utils.py

示例5: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s 
开发者ID:wentaozhu,项目名称:deep-mil-for-whole-mammogram-classification,代码行数:15,代码来源:customlayers.py

示例6: ResNet50_pre

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def ResNet50_pre(imgs, scope):
    return Lambda(lambda x: K.reverse(x, len(x.shape)-1) - [103.939,116.779,123.68], name=scope+'resnet50_pre')(imgs) 
开发者ID:sangxia,项目名称:nips-2017-adversarial,代码行数:4,代码来源:network_utils.py

示例7: viterbi_decoding

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def viterbi_decoding(self, X, mask=None):
        input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
        if self.use_boundary:
            input_energy = self.add_boundary_energy(
                input_energy, mask, self.left_boundary, self.right_boundary)

        argmin_tables = self.recursion(input_energy, mask, return_logZ=False)
        argmin_tables = K.cast(argmin_tables, 'int32')

        # backward to find best path, `initial_best_idx` can be any,
        # as all elements in the last argmin_table are the same
        argmin_tables = K.reverse(argmin_tables, 1)
        # matrix instead of vector is required by tf `K.rnn`
        initial_best_idx = [K.expand_dims(argmin_tables[:, 0, 0])]
        if K.backend() == 'theano':
            initial_best_idx = [K.T.unbroadcast(initial_best_idx[0], 1)]

        def gather_each_row(params, indices):
            n = K.shape(indices)[0]
            if K.backend() == 'theano':
                return params[K.T.arange(n), indices]
            else:
                indices = K.transpose(K.stack([K.tf.range(n), indices]))
                return K.tf.gather_nd(params, indices)

        def find_path(argmin_table, best_idx):
            next_best_idx = gather_each_row(argmin_table, best_idx[0][:, 0])
            next_best_idx = K.expand_dims(next_best_idx)
            if K.backend() == 'theano':
                next_best_idx = K.T.unbroadcast(next_best_idx, 1)
            return next_best_idx, [next_best_idx]

        _, best_paths, _ = K.rnn(find_path, argmin_tables, initial_best_idx,
                                 input_length=K.int_shape(X)[1], unroll=self.unroll)
        best_paths = K.reverse(best_paths, 1)
        best_paths = K.squeeze(best_paths, 2)

        return K.one_hot(best_paths, self.units) 
开发者ID:yongzhuo,项目名称:nlp_xiaojiang,代码行数:40,代码来源:keras_bert_layer.py

示例8: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def call(self, inputs, **kwargs):
        y_rev = super().call(inputs, **kwargs)
        return K.reverse(y_rev, 1) 
开发者ID:IlyaGusev,项目名称:rnnmorph,代码行数:5,代码来源:model.py

示例9: viterbi_decoding

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def viterbi_decoding(self, X, mask=None):
        input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
        if self.use_boundary:
            input_energy = self.add_boundary_energy(input_energy, mask, self.left_boundary, self.right_boundary)

        argmin_tables = self.recursion(input_energy, mask, return_logZ=False)
        argmin_tables = K.cast(argmin_tables, 'int32')

        # backward to find best path, `initial_best_idx` can be any, as all elements in the last argmin_table are the same
        argmin_tables = K.reverse(argmin_tables, 1)
        initial_best_idx = [K.expand_dims(argmin_tables[:, 0, 0])]  # matrix instead of vector is required by tf `K.rnn`
        if K.backend() == 'theano':
            initial_best_idx = [K.T.unbroadcast(initial_best_idx[0], 1)]

        def gather_each_row(params, indices):
            n = K.shape(indices)[0]
            if K.backend() == 'theano':
                return params[K.T.arange(n), indices]
            else:
                indices = K.transpose(K.stack([K.tf.range(n), indices]))
                return K.tf.gather_nd(params, indices)

        def find_path(argmin_table, best_idx):
            next_best_idx = gather_each_row(argmin_table, best_idx[0][:, 0])
            next_best_idx = K.expand_dims(next_best_idx)
            if K.backend() == 'theano':
                next_best_idx = K.T.unbroadcast(next_best_idx, 1)
            return next_best_idx, [next_best_idx]

        _, best_paths, _ = K.rnn(find_path, argmin_tables, initial_best_idx, input_length=K.int_shape(X)[1], unroll=self.unroll)
        best_paths = K.reverse(best_paths, 1)
        best_paths = K.squeeze(best_paths, 2)

        return K.one_hot(best_paths, self.units) 
开发者ID:yongyuwen,项目名称:sequence-tagging-ner,代码行数:36,代码来源:layers.py

示例10: create_model

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def create_model(self, hyper_parameters):
        """
            构建神经网络,行卷积加池化
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        embedding_output = self.word_embedding.output
        # rnn layers
        if self.rnn_units=="LSTM":
                layer_cell = LSTM
        else:
            layer_cell = GRU
        # 反向
        x_backwords = layer_cell(units=self.rnn_units,
                                    return_sequences=True,
                                    kernel_regularizer=regularizers.l2(0.32 * 0.1),
                                    recurrent_regularizer=regularizers.l2(0.32),
                                    go_backwards = True)(embedding_output)
        x_backwords_reverse = Lambda(lambda x: K.reverse(x, axes=1))(x_backwords)
        # 前向
        x_fordwords = layer_cell(units=self.rnn_units,
                                    return_sequences=True,
                                    kernel_regularizer=regularizers.l2(0.32 * 0.1),
                                    recurrent_regularizer=regularizers.l2(0.32),
                                    go_backwards = False)(embedding_output)
        # 拼接
        x_feb = Concatenate(axis=2)([x_fordwords, embedding_output, x_backwords_reverse])

        ####使用多个卷积核##################################################
        x_feb = Dropout(self.dropout)(x_feb)
        # Concatenate后的embedding_size
        dim_2 = K.int_shape(x_feb)[2]
        x_feb_reshape = Reshape((self.len_max, dim_2, 1))(x_feb)
        # 提取n-gram特征和最大池化, 一般不用平均池化
        conv_pools = []
        for filter in self.filters:
            conv = Conv2D(filters = self.filters_num,
                          kernel_size = (filter, dim_2),
                          padding = 'valid',
                          kernel_initializer = 'normal',
                          activation = 'relu',
                          )(x_feb_reshape)
            pooled = MaxPooling2D(pool_size = (self.len_max - filter + 1, 1),
                                   strides = (1, 1),
                                   padding = 'valid',
                                   )(conv)
            conv_pools.append(pooled)
        # 拼接
        x = Concatenate()(conv_pools)
        x = Flatten()(x)
        #########################################################################

        output = Dense(units=self.label, activation=self.activate_classify)(x)
        self.model = Model(inputs=self.word_embedding.input, outputs=output)
        self.model.summary(120) 
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:58,代码来源:graph.py

示例11: recursion

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def recursion(self, input_energy, mask=None, go_backwards=False,
                  return_sequences=True, return_logZ=True, input_length=None):
        """Forward (alpha) or backward (beta) recursion

        If `return_logZ = True`, compute the logZ, the normalization constant:

        \[ Z = \sum_{y1, y2, y3} exp(-E) # energy
          = \sum_{y1, y2, y3} exp(-(u1' y1 + y1' W y2 + u2' y2 + y2' W y3 + u3' y3))
          = sum_{y2, y3} (exp(-(u2' y2 + y2' W y3 + u3' y3))
          sum_{y1} exp(-(u1' y1' + y1' W y2))) \]

        Denote:
            \[ S(y2) := sum_{y1} exp(-(u1' y1 + y1' W y2)), \]
            \[ Z = sum_{y2, y3} exp(log S(y2) - (u2' y2 + y2' W y3 + u3' y3)) \]
            \[ logS(y2) = log S(y2) = log_sum_exp(-(u1' y1' + y1' W y2)) \]
        Note that:
              yi's are one-hot vectors
              u1, u3: boundary energies have been merged

        If `return_logZ = False`, compute the Viterbi's best path lookup table.
        """
        chain_energy = self.chain_kernel
        # shape=(1, F, F): F=num of output features. 1st F is for t-1, 2nd F for t
        chain_energy = K.expand_dims(chain_energy, 0)
        # shape=(B, F), dtype=float32
        prev_target_val = K.zeros_like(input_energy[:, 0, :])

        if go_backwards:
            input_energy = K.reverse(input_energy, 1)
            if mask is not None:
                mask = K.reverse(mask, 1)

        initial_states = [prev_target_val, K.zeros_like(prev_target_val[:, :1])]
        constants = [chain_energy]

        if mask is not None:
            mask2 = K.cast(K.concatenate([mask, K.zeros_like(mask[:, :1])], axis=1),
                           K.floatx())
            constants.append(mask2)

        def _step(input_energy_i, states):
            return self.step(input_energy_i, states, return_logZ)

        target_val_last, target_val_seq, _ = K.rnn(_step, input_energy,
                                                   initial_states,
                                                   constants=constants,
                                                   input_length=input_length,
                                                   unroll=self.unroll)

        if return_sequences:
            if go_backwards:
                target_val_seq = K.reverse(target_val_seq, 1)
            return target_val_seq
        else:
            return target_val_last 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:57,代码来源:crf.py

示例12: viterbi_decoding

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def viterbi_decoding(self, X, mask=None):
        input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
        if self.use_boundary:
            input_energy = self.add_boundary_energy(
                input_energy, mask, self.left_boundary, self.right_boundary)

        argmin_tables = self.recursion(input_energy, mask, return_logZ=False)
        argmin_tables = K.cast(argmin_tables, 'int32')

        # backward to find best path, `initial_best_idx` can be any,
        # as all elements in the last argmin_table are the same
        argmin_tables = K.reverse(argmin_tables, 1)
        # matrix instead of vector is required by tf `K.rnn`
        initial_best_idx = [K.expand_dims(argmin_tables[:, 0, 0])]
        if K.backend() == 'theano':
            from theano import tensor as T
            initial_best_idx = [T.unbroadcast(initial_best_idx[0], 1)]

        def gather_each_row(params, indices):
            n = K.shape(indices)[0]
            if K.backend() == 'theano':
                from theano import tensor as T
                return params[T.arange(n), indices]
            elif K.backend() == 'tensorflow':
                import tensorflow as tf
                indices = K.transpose(K.stack([tf.range(n), indices]))
                return tf.gather_nd(params, indices)
            else:
                raise NotImplementedError

        def find_path(argmin_table, best_idx):
            next_best_idx = gather_each_row(argmin_table, best_idx[0][:, 0])
            next_best_idx = K.expand_dims(next_best_idx)
            if K.backend() == 'theano':
                from theano import tensor as T
                next_best_idx = T.unbroadcast(next_best_idx, 1)
            return next_best_idx, [next_best_idx]

        _, best_paths, _ = K.rnn(find_path, argmin_tables, initial_best_idx,
                                 input_length=K.int_shape(X)[1], unroll=self.unroll)
        best_paths = K.reverse(best_paths, 1)
        best_paths = K.squeeze(best_paths, 2)

        return K.one_hot(best_paths, self.units) 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:46,代码来源:crf.py

示例13: test_Bidirectional_merged_value

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import reverse [as 别名]
def test_Bidirectional_merged_value(merge_mode):
    rnn = layers.LSTM
    samples = 2
    dim = 5
    timesteps = 3
    units = 3
    X = [np.random.rand(samples, timesteps, dim)]

    if merge_mode == 'sum':
        merge_func = lambda y, y_rev: y + y_rev
    elif merge_mode == 'mul':
        merge_func = lambda y, y_rev: y * y_rev
    elif merge_mode == 'ave':
        merge_func = lambda y, y_rev: (y + y_rev) / 2
    elif merge_mode == 'concat':
        merge_func = lambda y, y_rev: np.concatenate((y, y_rev), axis=-1)
    else:
        merge_func = lambda y, y_rev: [y, y_rev]

    # basic case
    inputs = Input((timesteps, dim))
    layer = wrappers.Bidirectional(rnn(units, return_sequences=True), merge_mode=merge_mode)
    f_merged = K.function([inputs], _to_list(layer(inputs)))
    f_forward = K.function([inputs], [layer.forward_layer.call(inputs)])
    f_backward = K.function([inputs], [K.reverse(layer.backward_layer.call(inputs), 1)])

    y_merged = f_merged(X)
    y_expected = _to_list(merge_func(f_forward(X)[0], f_backward(X)[0]))
    assert len(y_merged) == len(y_expected)
    for x1, x2 in zip(y_merged, y_expected):
        assert_allclose(x1, x2, atol=1e-5)

    # test return_state
    inputs = Input((timesteps, dim))
    layer = wrappers.Bidirectional(rnn(units, return_state=True), merge_mode=merge_mode)
    f_merged = K.function([inputs], layer(inputs))
    f_forward = K.function([inputs], layer.forward_layer.call(inputs))
    f_backward = K.function([inputs], layer.backward_layer.call(inputs))
    n_states = len(layer.layer.states)

    y_merged = f_merged(X)
    y_forward = f_forward(X)
    y_backward = f_backward(X)
    y_expected = _to_list(merge_func(y_forward[0], y_backward[0]))
    assert len(y_merged) == len(y_expected) + n_states * 2
    for x1, x2 in zip(y_merged, y_expected):
        assert_allclose(x1, x2, atol=1e-5)

    # test if the state of a BiRNN is the concatenation of the underlying RNNs
    y_merged = y_merged[-n_states * 2:]
    y_forward = y_forward[-n_states:]
    y_backward = y_backward[-n_states:]
    for state_birnn, state_inner in zip(y_merged, y_forward + y_backward):
        assert_allclose(state_birnn, state_inner, atol=1e-5) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:56,代码来源:wrappers_test.py


注:本文中的keras.backend.reverse方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。