當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.cumsum方法代碼示例

本文整理匯總了Python中keras.backend.cumsum方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.cumsum方法的具體用法?Python backend.cumsum怎麽用?Python backend.cumsum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.backend的用法示例。


在下文中一共展示了backend.cumsum方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: Mask

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cumsum [as 別名]
def Mask(self, inputs, seq_len, mode='mul'):
        """
        # Arguments:
            inputs: input tensor with shape (batch_size, seq_len, input_size)
            seq_len: Each sequence's actual length with shape (batch_size,)
            mode:
                mul: mask the rest dim with zero, used before fully-connected layer
                add: subtract a big constant from the rest, used before softmax layer
        # Reutrns:
            Masked tensors with the same shape of input tensor
        """
        if seq_len is None:
            return inputs
        else:
            mask = K.one_hot(seq_len[:, 0], K.shape(inputs)[1])
            mask = 1 - K.cumsum(mask, 1)
            for _ in range(len(inputs.shape) - 2):
                mask = K.expand_dims(mask, 2)
            if mode == 'mul':
                return inputs * mask
            if mode == 'add':
                return inputs - (1 - mask) * 1e12 
開發者ID:stevewyl,項目名稱:nlp_toolkit,代碼行數:24,代碼來源:self_attention.py

示例2: call

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cumsum [as 別名]
def call(self, x):
        if (self.size is None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])
        batch_size, seq_len = K.shape(x)[0], K.shape(x)[1]
        position_j = 1. / K.pow(10000.,
                                2 * K.arange(self.size / 2, dtype='float32'
                                             ) / self.size)
        position_j = K.expand_dims(position_j, 0)
        # K.arange不支持變長,隻好用這種方法生成
        position_i = K.cumsum(K.ones_like(x[:, :, 0]), 1) - 1
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        position_ij = K.concatenate(
            [K.cos(position_ij), K.sin(position_ij)], 2)
        if self.mode == 'sum':
            return position_ij + x
        elif self.mode == 'concat':
            return K.concatenate([position_ij, x], 2) 
開發者ID:stevewyl,項目名稱:nlp_toolkit,代碼行數:20,代碼來源:position_embedding.py

示例3: output_sampling

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cumsum [as 別名]
def output_sampling(self, output, rand_matrix):
        # Generates a sampled selection based on raw output state vector
        # Creates a cdf vector and compares against a randomly generated vector
        # Requires a pre-generated rand_matrix (i.e. generated outside step function)

        sampled_output = output / K.sum(output, axis=-1, keepdims=True)  # (batch_size, self.units)
        mod_sampled_output = sampled_output / K.exp(self.temperature)
        norm_exp_sampled_output = mod_sampled_output / K.sum(mod_sampled_output, axis=-1, keepdims=True)

        cdf_vector = K.cumsum(norm_exp_sampled_output, axis=-1)
        cdf_minus_vector = cdf_vector - norm_exp_sampled_output

        rand_matrix = K.stack([rand_matrix], axis=0)
        rand_matrix = K.stack([rand_matrix], axis=2)

        compared_greater_output = K.cast(K.greater(cdf_vector, rand_matrix), dtype='float32')
        compared_lesser_output = K.cast(K.less(cdf_minus_vector, rand_matrix), dtype='float32')

        final_output = compared_greater_output * compared_lesser_output
        return final_output 
開發者ID:aspuru-guzik-group,項目名稱:chemical_vae,代碼行數:22,代碼來源:tgru_k2_gpu.py

示例4: _get_pos_seq

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cumsum [as 別名]
def _get_pos_seq(x, null_token_value=0):
    mask = K.cast(K.not_equal(x, null_token_value), 'float32')
    pos = K.cumsum(K.ones_like(x, 'float32'), 1)
    return pos * mask 
開發者ID:zimmerrol,項目名稱:attention-is-all-you-need-keras,代碼行數:6,代碼來源:model.py

示例5: earth_mover_loss

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cumsum [as 別名]
def earth_mover_loss(y_true, y_pred):
    cdf_ytrue = K.cumsum(y_true, axis=-1)
    cdf_ypred = K.cumsum(y_pred, axis=-1)
    samplewise_emd = K.sqrt(K.mean(K.square(K.abs(cdf_ytrue - cdf_ypred)), axis=-1))
    return K.mean(samplewise_emd) 
開發者ID:titu1994,項目名稱:neural-image-assessment,代碼行數:7,代碼來源:pretrain_inception_resnet.py

示例6: call

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cumsum [as 別名]
def call(self, inputs, mask=None):
        # pylint: disable=redefined-variable-type

        # This section implements the positional encoder on all the vectors at once.
        # The general idea is to use ones matrices in the shape of `inputs` to create indexes per
        # word.

        if mask is None:
            ones_like_x = K.ones_like(inputs)
        else:
            float_mask = K.cast(mask, 'float32')
            ones_like_x = K.ones_like(inputs) * K.expand_dims(float_mask, 2)

        # This is an odd way to get the number of words(ie the first dimension of inputs).
        # However, if the input is masked, using the dimension directly does not
        # equate to the correct number of words. We fix this by adding up a relevant
        # row of ones which has been masked if required.
        masked_m = K.expand_dims(K.sum(ones_like_x, 1), 1)

        if mask is None:
            one_over_m = ones_like_x / masked_m
            j_index = K.cumsum(ones_like_x, 1)
        else:
            one_over_m = switch(ones_like_x, ones_like_x/masked_m, K.zeros_like(ones_like_x))

            j_index = K.cumsum(ones_like_x, 1) * K.expand_dims(float_mask, 2)

        k_over_d = K.cumsum(ones_like_x, 2) * 1.0/K.cast(K.shape(inputs)[2], 'float32')

        l_weighting_vectors = (ones_like_x - (j_index * one_over_m)) - \
                              (k_over_d * (ones_like_x - 2 * j_index * one_over_m))

        return K.sum(l_weighting_vectors * inputs, 1) 
開發者ID:allenai,項目名稱:deep_qa,代碼行數:35,代碼來源:positional_encoder.py

示例7: call

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cumsum [as 別名]
def call(self, inputs, mask=None):
        span_begin, span_end = inputs
        after_span_begin = K.cumsum(span_begin, axis=-1)
        after_span_end = K.cumsum(span_end, axis=-1)
        before_span_end = 1.0 - after_span_end
        return after_span_begin * before_span_end 
開發者ID:allenai,項目名稱:deep_qa,代碼行數:8,代碼來源:envelope.py

示例8: sequence_mask

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cumsum [as 別名]
def sequence_mask(seq):
    """

    :param seq: shape of [N, T_q]
    :return:
    """
    seq_len = K.shape(seq)[1]
    batch_size = K.shape(seq)[:1]
    return K.cast(K.cumsum(tf.eye(seq_len, batch_shape=batch_size), axis=1), dtype='float32') 
開發者ID:GlassyWing,項目名稱:transformer-keras,代碼行數:11,代碼來源:core.py

示例9: get_pos_seq

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cumsum [as 別名]
def get_pos_seq(self, x):
        mask = K.cast(K.not_equal(x, 0), dtype="int32")
        pos = K.cumsum(K.ones_like(x, dtype='int32'), axis=1)
        return mask * pos 
開發者ID:GlassyWing,項目名稱:transformer-keras,代碼行數:6,代碼來源:core.py

示例10: call

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cumsum [as 別名]
def call(self, x):
        if (self.size == None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])
        batch_size, seq_len = K.shape(x)[0], K.shape(x)[1]
        position_j = 1. / K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
        position_j = K.expand_dims(position_j, 0)
        position_i = K.cumsum(K.ones_like(x[:, :, 0]), 1) - 1  # K.arange不支持變長,隻好用這種方法生成
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
        if self.mode == 'sum':
            return position_ij + x
        elif self.mode == 'concat':
            return K.concatenate([position_ij, x], 2) 
開發者ID:foamliu,項目名稱:Self-Attention-Keras,代碼行數:16,代碼來源:attention.py

示例11: Mask

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cumsum [as 別名]
def Mask(self, inputs, seq_len, mode='mul'):
        if seq_len == None:
            return inputs
        else:
            mask = K.one_hot(seq_len[:, 0], K.shape(inputs)[1])
            mask = 1 - K.cumsum(mask, 1)
            for _ in range(len(inputs.shape) - 2):
                mask = K.expand_dims(mask, 2)
            if mode == 'mul':
                return inputs * mask
            if mode == 'add':
                return inputs - (1 - mask) * 1e12 
開發者ID:foamliu,項目名稱:Self-Attention-Keras,代碼行數:14,代碼來源:attention.py


注:本文中的keras.backend.cumsum方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。