当前位置: 首页>>代码示例>>Python>>正文


Python backend.cumsum方法代码示例

本文整理汇总了Python中keras.backend.cumsum方法的典型用法代码示例。如果您正苦于以下问题:Python backend.cumsum方法的具体用法?Python backend.cumsum怎么用?Python backend.cumsum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.cumsum方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Mask

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cumsum [as 别名]
def Mask(self, inputs, seq_len, mode='mul'):
        """
        # Arguments:
            inputs: input tensor with shape (batch_size, seq_len, input_size)
            seq_len: Each sequence's actual length with shape (batch_size,)
            mode:
                mul: mask the rest dim with zero, used before fully-connected layer
                add: subtract a big constant from the rest, used before softmax layer
        # Reutrns:
            Masked tensors with the same shape of input tensor
        """
        if seq_len is None:
            return inputs
        else:
            mask = K.one_hot(seq_len[:, 0], K.shape(inputs)[1])
            mask = 1 - K.cumsum(mask, 1)
            for _ in range(len(inputs.shape) - 2):
                mask = K.expand_dims(mask, 2)
            if mode == 'mul':
                return inputs * mask
            if mode == 'add':
                return inputs - (1 - mask) * 1e12 
开发者ID:stevewyl,项目名称:nlp_toolkit,代码行数:24,代码来源:self_attention.py

示例2: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cumsum [as 别名]
def call(self, x):
        if (self.size is None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])
        batch_size, seq_len = K.shape(x)[0], K.shape(x)[1]
        position_j = 1. / K.pow(10000.,
                                2 * K.arange(self.size / 2, dtype='float32'
                                             ) / self.size)
        position_j = K.expand_dims(position_j, 0)
        # K.arange不支持变长,只好用这种方法生成
        position_i = K.cumsum(K.ones_like(x[:, :, 0]), 1) - 1
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        position_ij = K.concatenate(
            [K.cos(position_ij), K.sin(position_ij)], 2)
        if self.mode == 'sum':
            return position_ij + x
        elif self.mode == 'concat':
            return K.concatenate([position_ij, x], 2) 
开发者ID:stevewyl,项目名称:nlp_toolkit,代码行数:20,代码来源:position_embedding.py

示例3: output_sampling

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cumsum [as 别名]
def output_sampling(self, output, rand_matrix):
        # Generates a sampled selection based on raw output state vector
        # Creates a cdf vector and compares against a randomly generated vector
        # Requires a pre-generated rand_matrix (i.e. generated outside step function)

        sampled_output = output / K.sum(output, axis=-1, keepdims=True)  # (batch_size, self.units)
        mod_sampled_output = sampled_output / K.exp(self.temperature)
        norm_exp_sampled_output = mod_sampled_output / K.sum(mod_sampled_output, axis=-1, keepdims=True)

        cdf_vector = K.cumsum(norm_exp_sampled_output, axis=-1)
        cdf_minus_vector = cdf_vector - norm_exp_sampled_output

        rand_matrix = K.stack([rand_matrix], axis=0)
        rand_matrix = K.stack([rand_matrix], axis=2)

        compared_greater_output = K.cast(K.greater(cdf_vector, rand_matrix), dtype='float32')
        compared_lesser_output = K.cast(K.less(cdf_minus_vector, rand_matrix), dtype='float32')

        final_output = compared_greater_output * compared_lesser_output
        return final_output 
开发者ID:aspuru-guzik-group,项目名称:chemical_vae,代码行数:22,代码来源:tgru_k2_gpu.py

示例4: _get_pos_seq

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cumsum [as 别名]
def _get_pos_seq(x, null_token_value=0):
    mask = K.cast(K.not_equal(x, null_token_value), 'float32')
    pos = K.cumsum(K.ones_like(x, 'float32'), 1)
    return pos * mask 
开发者ID:zimmerrol,项目名称:attention-is-all-you-need-keras,代码行数:6,代码来源:model.py

示例5: earth_mover_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cumsum [as 别名]
def earth_mover_loss(y_true, y_pred):
    cdf_ytrue = K.cumsum(y_true, axis=-1)
    cdf_ypred = K.cumsum(y_pred, axis=-1)
    samplewise_emd = K.sqrt(K.mean(K.square(K.abs(cdf_ytrue - cdf_ypred)), axis=-1))
    return K.mean(samplewise_emd) 
开发者ID:titu1994,项目名称:neural-image-assessment,代码行数:7,代码来源:pretrain_inception_resnet.py

示例6: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cumsum [as 别名]
def call(self, inputs, mask=None):
        # pylint: disable=redefined-variable-type

        # This section implements the positional encoder on all the vectors at once.
        # The general idea is to use ones matrices in the shape of `inputs` to create indexes per
        # word.

        if mask is None:
            ones_like_x = K.ones_like(inputs)
        else:
            float_mask = K.cast(mask, 'float32')
            ones_like_x = K.ones_like(inputs) * K.expand_dims(float_mask, 2)

        # This is an odd way to get the number of words(ie the first dimension of inputs).
        # However, if the input is masked, using the dimension directly does not
        # equate to the correct number of words. We fix this by adding up a relevant
        # row of ones which has been masked if required.
        masked_m = K.expand_dims(K.sum(ones_like_x, 1), 1)

        if mask is None:
            one_over_m = ones_like_x / masked_m
            j_index = K.cumsum(ones_like_x, 1)
        else:
            one_over_m = switch(ones_like_x, ones_like_x/masked_m, K.zeros_like(ones_like_x))

            j_index = K.cumsum(ones_like_x, 1) * K.expand_dims(float_mask, 2)

        k_over_d = K.cumsum(ones_like_x, 2) * 1.0/K.cast(K.shape(inputs)[2], 'float32')

        l_weighting_vectors = (ones_like_x - (j_index * one_over_m)) - \
                              (k_over_d * (ones_like_x - 2 * j_index * one_over_m))

        return K.sum(l_weighting_vectors * inputs, 1) 
开发者ID:allenai,项目名称:deep_qa,代码行数:35,代码来源:positional_encoder.py

示例7: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cumsum [as 别名]
def call(self, inputs, mask=None):
        span_begin, span_end = inputs
        after_span_begin = K.cumsum(span_begin, axis=-1)
        after_span_end = K.cumsum(span_end, axis=-1)
        before_span_end = 1.0 - after_span_end
        return after_span_begin * before_span_end 
开发者ID:allenai,项目名称:deep_qa,代码行数:8,代码来源:envelope.py

示例8: sequence_mask

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cumsum [as 别名]
def sequence_mask(seq):
    """

    :param seq: shape of [N, T_q]
    :return:
    """
    seq_len = K.shape(seq)[1]
    batch_size = K.shape(seq)[:1]
    return K.cast(K.cumsum(tf.eye(seq_len, batch_shape=batch_size), axis=1), dtype='float32') 
开发者ID:GlassyWing,项目名称:transformer-keras,代码行数:11,代码来源:core.py

示例9: get_pos_seq

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cumsum [as 别名]
def get_pos_seq(self, x):
        mask = K.cast(K.not_equal(x, 0), dtype="int32")
        pos = K.cumsum(K.ones_like(x, dtype='int32'), axis=1)
        return mask * pos 
开发者ID:GlassyWing,项目名称:transformer-keras,代码行数:6,代码来源:core.py

示例10: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cumsum [as 别名]
def call(self, x):
        if (self.size == None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])
        batch_size, seq_len = K.shape(x)[0], K.shape(x)[1]
        position_j = 1. / K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
        position_j = K.expand_dims(position_j, 0)
        position_i = K.cumsum(K.ones_like(x[:, :, 0]), 1) - 1  # K.arange不支持变长,只好用这种方法生成
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
        if self.mode == 'sum':
            return position_ij + x
        elif self.mode == 'concat':
            return K.concatenate([position_ij, x], 2) 
开发者ID:foamliu,项目名称:Self-Attention-Keras,代码行数:16,代码来源:attention.py

示例11: Mask

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cumsum [as 别名]
def Mask(self, inputs, seq_len, mode='mul'):
        if seq_len == None:
            return inputs
        else:
            mask = K.one_hot(seq_len[:, 0], K.shape(inputs)[1])
            mask = 1 - K.cumsum(mask, 1)
            for _ in range(len(inputs.shape) - 2):
                mask = K.expand_dims(mask, 2)
            if mode == 'mul':
                return inputs * mask
            if mode == 'add':
                return inputs - (1 - mask) * 1e12 
开发者ID:foamliu,项目名称:Self-Attention-Keras,代码行数:14,代码来源:attention.py


注:本文中的keras.backend.cumsum方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。