当前位置: 首页>>代码示例>>Python>>正文


Python merge.Multiply方法代码示例

本文整理汇总了Python中keras.layers.merge.Multiply方法的典型用法代码示例。如果您正苦于以下问题:Python merge.Multiply方法的具体用法?Python merge.Multiply怎么用?Python merge.Multiply使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.layers.merge的用法示例。


在下文中一共展示了merge.Multiply方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_attention_layer_with_doc_weighting

# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import Multiply [as 别名]
def add_attention_layer_with_doc_weighting(query_embedding, doc_embedding, layer_name, attention_level,
                                           query_weight, doc_weight, max_query_len, max_doc_len,
                                           query_mask=None, doc_mask=None, mask=False):
    dot_prod = Dot(axes=-1, name=layer_name)([doc_embedding, query_embedding])
    norm_sim = Activation('softmax')(dot_prod)
    reshaped_query_weight = Reshape((ATTENTION_DEEP_LEVEL, max_query_len, 1),
                                    input_shape=(ATTENTION_DEEP_LEVEL, max_query_len,))(query_weight)
    repeated_query_weight = Lambda(lambda x: repeat_vector(x[0], x[1], x[2]), output_shape=lambda inp_shp:(
        inp_shp[0][0], inp_shp[0][1], max_query_len, max_doc_len,))([reshaped_query_weight, max_doc_len, -1])
    reshaped_doc_weight = Reshape((ATTENTION_DEEP_LEVEL, 1, max_doc_len),
                                  input_shape=(ATTENTION_DEEP_LEVEL, max_doc_len,))(doc_weight)
    repeated_doc_weight = Lambda(lambda x: repeat_vector(x[0], x[1], x[2]), output_shape=lambda inp_shp: (
        inp_shp[0][0], inp_shp[0][1], max_query_len, max_doc_len,))([reshaped_doc_weight, max_query_len, -2])
    weight_product = Multiply()([repeated_query_weight, repeated_doc_weight])
    transformed_weight_product = Dense(max_doc_len, activation='relu')(weight_product)
    setattr(K, 'params', {'attention_level': attention_level})
    norm_sim = Lambda(lambda x: attention_weighting_prod(x[0], x[1]), output_shape=lambda inp_shp:(
        inp_shp[0][0], inp_shp[0][1], inp_shp[0][2]))([norm_sim, transformed_weight_product])
    if mask:
        max_sim = Lambda(lambda x: max_pooling_with_mask(x[0], x[1]), output_shape=lambda inp_shp: (
            inp_shp[0][0], inp_shp[0][2],))([norm_sim, query_mask])
        mean_sim = Lambda(lambda x: mean_pooling_with_mask(x[0], x[1], x[2]), output_shape=lambda inp_shp: (
            inp_shp[0][0], inp_shp[0][2],))([norm_sim, doc_mask, query_mask])
    else:
        max_sim = Lambda(max_pooling, output_shape=lambda inp_shp: (inp_shp[0], inp_shp[2], ))(norm_sim)
        mean_sim = Lambda(mean_pooling, output_shape=lambda inp_shp: (inp_shp[0], inp_shp[2],))(norm_sim)
    return norm_sim, max_sim, mean_sim


########################## Our model implementation ######################### 
开发者ID:jinfengr,项目名称:neural-tweet-search,代码行数:32,代码来源:attention_model.py

示例2: block

# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import Multiply [as 别名]
def block(input):
    cnn = Conv2D(128, (3, 3), padding="same", activation="linear", use_bias=False)(input)
    cnn = BatchNormalization(axis=-1)(cnn)

    cnn1 = Lambda(slice1, output_shape=slice1_output_shape)(cnn)
    cnn2 = Lambda(slice2, output_shape=slice2_output_shape)(cnn)

    cnn1 = Activation('linear')(cnn1)
    cnn2 = Activation('sigmoid')(cnn2)

    out = Multiply()([cnn1, cnn2])
    return out 
开发者ID:yongxuUSTC,项目名称:dcase2017_task4_cvssp,代码行数:14,代码来源:main_crnn_sed.py

示例3: _build

# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import Multiply [as 别名]
def _build(self):
        # get parameters
        proj = self.proj_params
        proj_axis = axes_dict(self.config.axes)[proj.axis]

        # define surface projection network (3D -> 2D)
        inp = u = Input(self.config.unet_input_shape)
        def conv_layers(u):
            for _ in range(proj.n_conv_per_depth):
                u = Conv3D(proj.n_filt, proj.kern, padding='same', activation='relu')(u)
            return u
        # down
        for _ in range(proj.n_depth):
            u = conv_layers(u)
            u = MaxPooling3D(proj.pool)(u)
        # middle
        u = conv_layers(u)
        # up
        for _ in range(proj.n_depth):
            u = UpSampling3D(proj.pool)(u)
            u = conv_layers(u)
        u = Conv3D(1, proj.kern, padding='same', activation='linear')(u)
        # convert learned features along Z to surface probabilities
        # (add 1 to proj_axis because of batch dimension in tensorflow)
        u = Lambda(lambda x: softmax(x, axis=1+proj_axis))(u)
        # multiply Z probabilities with Z values in input stack
        u = Multiply()([inp, u])
        # perform surface projection by summing over weighted Z values
        u = Lambda(lambda x: K.sum(x, axis=1+proj_axis))(u)
        model_projection = Model(inp, u)

        # define denoising network (2D -> 2D)
        # (remove projected axis from input_shape)
        input_shape = list(self.config.unet_input_shape)
        del input_shape[proj_axis]
        model_denoising = nets.common_unet(
            n_dim           = self.config.n_dim-1,
            n_channel_out   = self.config.n_channel_out,
            prob_out        = self.config.probabilistic,
            residual        = self.config.unet_residual,
            n_depth         = self.config.unet_n_depth,
            kern_size       = self.config.unet_kern_size,
            n_first         = self.config.unet_n_first,
            last_activation = self.config.unet_last_activation,
        )(tuple(input_shape))

        # chain models together
        return Model(inp, model_denoising(model_projection(inp))) 
开发者ID:CSBDeep,项目名称:CSBDeep,代码行数:50,代码来源:care_projection.py


注:本文中的keras.layers.merge.Multiply方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。