当前位置: 首页>>代码示例>>Python>>正文


Python backend.slice方法代码示例

本文整理汇总了Python中keras.backend.slice方法的典型用法代码示例。如果您正苦于以下问题:Python backend.slice方法的具体用法?Python backend.slice怎么用?Python backend.slice使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.slice方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import slice [as 别名]
def call(self, inputs, **kwargs):
        if not (isinstance(inputs, list) and len(inputs) == 2):
            raise ValueError(
                'You can call this layer only with a list of two tensors '
                '(for keys/values and queries)')
        key_values_input, query_input = inputs
        _, value_seq_len, d_model = K.int_shape(key_values_input)
        query_seq_len = K.int_shape(inputs[1])[-2]
        # The first thing we need to do is to perform affine transformations
        # of the inputs to get the Queries, the Keys and the Values.
        kv = K.dot(K.reshape(key_values_input, [-1, d_model]), self.kv_weights)
        # splitting the keys, the values and the queries before further
        # processing
        pre_k, pre_v = [
            K.reshape(
                # K.slice(kv, (0, i * d_model), (-1, d_model)),
                kv[:, i * d_model: (i + 1) * d_model],
                (-1, value_seq_len,
                 self.num_heads, d_model // self.num_heads))
            for i in range(2)]
        pre_q = K.reshape(
            K.dot(K.reshape(query_input, [-1, d_model]), self.q_weights),
            (-1, query_seq_len, self.num_heads, d_model // self.num_heads))
        return self.attention(pre_q, pre_v, pre_k, query_seq_len, d_model,
                              training=kwargs.get('training')) 
开发者ID:kpot,项目名称:keras-transformer,代码行数:27,代码来源:attention.py

示例2: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import slice [as 别名]
def call(self, inputs, mask=None):
        if self.use_token_type:
            assert  len(inputs) == 2, "`token_type_ids` must be specified if `use_token_type` is True."
            output = inputs[0]
            _, seq_length, input_width = K.int_shape(output)
            # print(inputs)
            assert seq_length == K.int_shape(inputs[1])[1], "width of `token_type_ids` must be equal to `seq_length`"
            token_type_ids = inputs[1]
            # assert K.int_shape(token_type_ids)[1] <= self.token_type_vocab_size
            flat_token_type_ids = K.reshape(token_type_ids, [-1])
            flat_token_type_ids = K.cast(flat_token_type_ids, dtype='int32')
            token_type_one_hot_ids = K.one_hot(flat_token_type_ids, num_classes=self.token_type_vocab_size)
            token_type_embeddings = K.dot(token_type_one_hot_ids, self.token_type_table)
            token_type_embeddings = K.reshape(token_type_embeddings, shape=[-1, seq_length, input_width])
            # print(token_type_embeddings)
            output += token_type_embeddings
        else:
            output = inputs
            seq_length = K.int_shape(inputs)[1]

        if self.use_position_embeddings:
            position_embeddings = K.slice(self.full_position_embeddings, [0, 0], [seq_length, -1])
            output += position_embeddings

        return output 
开发者ID:miroozyx,项目名称:BERT_with_keras,代码行数:27,代码来源:modeling.py

示例3: step

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import slice [as 别名]
def step(self, input_energy_t, states, return_logZ=True):
        # not in the following  `prev_target_val` has shape = (B, F)
        # where B = batch_size, F = output feature dim
        # Note: `i` is of float32, due to the behavior of `K.rnn`
        prev_target_val, i, chain_energy = states[:3]
        t = K.cast(i[0, 0], dtype='int32')
        if len(states) > 3:
            if K.backend() == 'theano':
                m = states[3][:, t:(t + 2)]
            else:
                m = K.slice(states[3], [0, t], [-1, 2])
            input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
            # (1, F, F)*(B, 1, 1) -> (B, F, F)
            chain_energy = chain_energy * K.expand_dims(
                K.expand_dims(m[:, 0] * m[:, 1]))
        if return_logZ:
            # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
            energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)
            new_target_val = K.logsumexp(-energy, 1)  # shapes: (B, F)
            return new_target_val, [new_target_val, i + 1]
        else:
            energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
            min_energy = K.min(energy, 1)
            # cast for tf-version `K.rnn
            argmin_table = K.cast(K.argmin(energy, 1), K.floatx())
            return argmin_table, [min_energy, i + 1] 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:28,代码来源:crf.py

示例4: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import slice [as 别名]
def call(self, inputs, **kwargs):
        one_hot_feature_index = K.cast(K.slice(inputs, (0, 0), (-1, self.feature_num)), "int32")
        numeric_feature = K.slice(inputs, (0, self.feature_num), (-1, -1))

        ## first order
        first_order_index = K.reshape(one_hot_feature_index, (-1,))
        get_first_order_weights = K.gather(self.w_one_hot, first_order_index)
        first_order_weights = K.reshape(get_first_order_weights, (-1, self.feature_num))

        first_order = K.sum(first_order_weights, 1) + K.sum(K.dot(numeric_feature, self.w_numeric), 1)

        ## second order
        get_second_order_weights = K.gather(self.v_one_hot, first_order_index)
        second_order_weights = K.reshape(get_second_order_weights, (-1, self.feature_num, self.embedding_size))
        numeric_weights = K.expand_dims(self.v_numeric, 0) * K.expand_dims(numeric_feature, -1)

        all_weights = K.concatenate([second_order_weights, numeric_weights], axis=1)
        weights_sum_square = K.sum(K.square(all_weights), 1)
        weights_square_sum = K.square(K.sum(all_weights, 1))
        second_order = 0.5*K.sum(weights_square_sum - weights_sum_square, 1)

        output = first_order + second_order + self.b

        if self.activation is not None:
        	output = self.activation(output)
        output = K.expand_dims(output, -1)
        return output



        '''X_square = K.square(inputs)

        xv = K.square(K.dot(inputs, self.v))
        xw = K.dot(inputs, self.w)

        p = 0.5 * K.sum(xv - K.dot(X_square, K.square(self.v)), 1)
        rp = K.repeat_elements(K.reshape(p, (-1, 1)), self.output_dim, axis=-1)

        f = xw + rp + self.b

        output = K.reshape(f, (-1, self.output_dim))
        
        if self.activation is not None:
            output = self.activation(output)

        return output''' 
开发者ID:DominickZhang,项目名称:KDDCup2019_admin,代码行数:48,代码来源:fm_keras.py

示例5: make_online

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import slice [as 别名]
def make_online(self):
        embedding = K.variable(np.random.uniform(0, 1, (self.dataset.nsize, self.flowargs['embdim'])))
        prevemb = K.placeholder(ndim=2, dtype='float32')  # (nsize, d)
        data = K.placeholder(ndim=2, dtype='int32')  # (batchsize, 5), [k, from_pos, to_pos, from_neg, to_neg]
        weight = K.placeholder(ndim=1, dtype='float32')  # (batchsize, )

        if K._BACKEND == 'theano':
            # (batchsize, d) => (batchsize, )
            # data[:, 0] should be always 0, so we simply ignore it
            # note, when you want to use it, that according to data generation procedure, the actual data[:, 0] is not 0
            dist_pos = embedding[data[:, 1]] - embedding[data[:, 2]]
            dist_pos = K.sum(dist_pos * dist_pos, axis=-1)
            dist_neg = embedding[data[:, 3]] - embedding[data[:, 4]]
            dist_neg = K.sum(dist_neg * dist_neg, axis=-1)
        else:
            dist_pos = K.gather(embedding, K.squeeze(K.slice(data, [0, 1], [-1, 1]), axis=1)) - \
                       K.gather(embedding, K.squeeze(K.slice(data, [0, 2], [-1, 1]), axis=1))
            dist_pos = K.sum(dist_pos * dist_pos, axis=-1)
            dist_neg = K.gather(embedding, K.squeeze(K.slice(data, [0, 3], [-1, 1]), axis=1)) - \
                       K.gather(embedding, K.squeeze(K.slice(data, [0, 4], [-1, 1]), axis=1))
            dist_neg = K.sum(dist_neg * dist_neg, axis=-1)

        # (batchsize, )
        margin = 1
        lprox = K.maximum(margin + dist_pos - dist_neg, 0) * weight

        # (1, )
        lprox = K.mean(lprox)

        # lsmooth
        lsmooth = embedding - prevemb  # (nsize, d)
        lsmooth = K.sum(K.square(lsmooth), axis=-1)  # (nsize)
        lsmooth = K.mean(lsmooth)

        loss = lprox + self.flowargs['beta'][0] * lsmooth

        opt = optimizers.get({'class_name': 'Adagrad', 'config': {'lr': self.lr}})
        cstr = {embedding: constraints.get({'class_name': 'maxnorm', 'config': {'max_value': 1, 'axis': 1}})}
        upd = opt.get_updates([embedding], cstr, loss)
        lf = K.function([data, weight, prevemb], [loss], updates=upd)

        return lf, None, [embedding], {} 
开发者ID:luckiezhou,项目名称:DynamicTriad,代码行数:44,代码来源:dynamic_triad.py


注:本文中的keras.backend.slice方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。