当前位置: 首页>>代码示例>>Python>>正文


Python layers.multiply方法代码示例

本文整理汇总了Python中keras.layers.multiply方法的典型用法代码示例。如果您正苦于以下问题:Python layers.multiply方法的具体用法?Python layers.multiply怎么用?Python layers.multiply使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.layers的用法示例。


在下文中一共展示了layers.multiply方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: spatial_squeeze_excite_block

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def spatial_squeeze_excite_block(input):
    ''' Create a spatial squeeze-excite block

    Args:
        input: input tensor

    Returns: a keras tensor

    References
    -   [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579)
    '''

    se = Conv2D(1, (1, 1), activation='sigmoid', use_bias=False,
                kernel_initializer='he_normal')(input)

    x = multiply([input, se])
    return x 
开发者ID:titu1994,项目名称:keras-squeeze-excite-network,代码行数:19,代码来源:se.py

示例2: spatial_squeeze_excite_block

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def spatial_squeeze_excite_block(input_tensor):
    """ Create a spatial squeeze-excite block

    Args:
        input_tensor: input Keras tensor

    Returns: a Keras tensor

    References
    -   [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579)
    """

    se = Conv2D(1, (1, 1), activation='sigmoid', use_bias=False,
                kernel_initializer='he_normal')(input_tensor)

    x = multiply([input_tensor, se])
    return x 
开发者ID:titu1994,项目名称:keras-squeeze-excite-network,代码行数:19,代码来源:se.py

示例3: swish

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def swish(x,
          name="swish"):
    """
    Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    name : str, default 'swish'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    w = nn.Activation("sigmoid", name=name + "/sigmoid")(x)
    x = nn.multiply([x, w], name=name + "/mul")
    return x 
开发者ID:osmr,项目名称:imgclsmob,代码行数:22,代码来源:common.py

示例4: _softmax

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def _softmax(x, axis=-1, alpha=1):
    """
    building on keras implementation, allow alpha parameter

    Softmax activation function.
    # Arguments
        x : Tensor.
        axis: Integer, axis along which the softmax normalization is applied.
        alpha: a value to multiply all x
    # Returns
        Tensor, output of softmax transformation.
    # Raises
        ValueError: In case `dim(x) == 1`.
    """
    x = alpha * x
    ndim = K.ndim(x)
    if ndim == 2:
        return K.softmax(x)
    elif ndim > 2:
        e = K.exp(x - K.max(x, axis=axis, keepdims=True))
        s = K.sum(e, axis=axis, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor that is 1D') 
开发者ID:voxelmorph,项目名称:voxelmorph,代码行数:26,代码来源:models.py

示例5: test_tiny_mul_random

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def test_tiny_mul_random(self):
        np.random.seed(1988)
        input_dim = 10
        num_channels = 6

        # Define a model
        input_tensor = Input(shape=(input_dim,))
        x1 = Dense(num_channels)(input_tensor)
        x2 = Dense(num_channels)(x1)
        x3 = Dense(num_channels)(x1)
        x4 = multiply([x2, x3])
        x5 = Dense(num_channels)(x4)

        model = Model(inputs=[input_tensor], outputs=[x5])

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_model(model) 
开发者ID:apple,项目名称:coremltools,代码行数:22,代码来源:test_keras2_numeric.py

示例6: test_dense_elementwise_params

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def test_dense_elementwise_params(self):
        options = dict(modes=[add, multiply, concatenate, average, maximum])

        def build_model(mode):
            x1 = Input(shape=(3,))
            x2 = Input(shape=(3,))
            y1 = Dense(4)(x1)
            y2 = Dense(4)(x2)
            z = mode([y1, y2])
            model = Model([x1, x2], z)
            return mode, model

        product = itertools.product(*options.values())
        args = [build_model(p[0]) for p in product]
        print("Testing a total of %s cases. This could take a while" % len(args))
        for param, model in args:
            self._run_test(model, param) 
开发者ID:apple,项目名称:coremltools,代码行数:19,代码来源:test_keras2_numeric.py

示例7: RHN

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def RHN(input_dim, hidden_dim, depth):
    # Wrapped model
    inp = Input(batch_shape=(batch_size, input_dim))
    state = Input(batch_shape=(batch_size, hidden_dim))
    drop_mask = Input(batch_shape=(batch_size, hidden_dim))
    # To avoid all zero mask causing gradient to vanish
    inverted_drop_mask = Lambda(lambda x: 1.0 - x, output_shape=lambda s: s)(drop_mask)
    drop_mask_2 = Lambda(lambda x: x + 0., output_shape=lambda s: s)(inverted_drop_mask)
    dropped_state = multiply([state, inverted_drop_mask])
    y, new_state = RHNCell(units=hidden_dim, recurrence_depth=depth,
                           kernel_initializer=weight_init,
                           kernel_regularizer=l2(weight_decay),
                           kernel_constraint=max_norm(gradient_clip),
                           bias_initializer=Constant(transform_bias),
                           recurrent_initializer=weight_init,
                           recurrent_regularizer=l2(weight_decay),
                           recurrent_constraint=max_norm(gradient_clip))([inp, dropped_state])
    return RecurrentModel(input=inp, output=y,
                          initial_states=[state, drop_mask],
                          final_states=[new_state, drop_mask_2])


# lr decay Scheduler 
开发者ID:farizrahman4u,项目名称:recurrentshop,代码行数:25,代码来源:recurrent_highway_networks.py

示例8: QRNcell

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def QRNcell():
    xq = Input(batch_shape=(batch_size, embedding_dim * 2))
    # Split into context and query
    xt = Lambda(lambda x, dim: x[:, :dim], arguments={'dim': embedding_dim},
                output_shape=lambda s: (s[0], s[1] / 2))(xq)
    qt = Lambda(lambda x, dim: x[:, dim:], arguments={'dim': embedding_dim},
                output_shape=lambda s: (s[0], s[1] / 2))(xq)

    h_tm1 = Input(batch_shape=(batch_size, embedding_dim))

    zt = Dense(1, activation='sigmoid', bias_initializer=Constant(2.5))(multiply([xt, qt]))
    zt = Lambda(lambda x, dim: K.repeat_elements(x, dim, axis=1), arguments={'dim': embedding_dim})(zt)
    ch = Dense(embedding_dim, activation='tanh')(concatenate([xt, qt], axis=-1))
    rt = Dense(1, activation='sigmoid')(multiply([xt, qt]))
    rt = Lambda(lambda x, dim: K.repeat_elements(x, dim, axis=1), arguments={'dim': embedding_dim})(rt)
    ht = add([multiply([zt, ch, rt]), multiply([Lambda(lambda x: 1 - x, output_shape=lambda s: s)(zt), h_tm1])])
    return RecurrentModel(input=xq, output=ht, initial_states=[h_tm1], final_states=[ht], return_sequences=True)


#
# Load data
# 
开发者ID:farizrahman4u,项目名称:recurrentshop,代码行数:24,代码来源:query_reduction_network.py

示例9: test_merge_multiply

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def test_merge_multiply():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    i3 = layers.Input(shape=(4, 5))
    o = layers.multiply([i1, i2, i3])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2, i3], o)

    mul_layer = layers.Multiply()
    o2 = mul_layer([i1, i2, i3])
    assert mul_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    x3 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2, x3])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, x1 * x2 * x3, atol=1e-4) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:20,代码来源:merge_test.py

示例10: gatedblock

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def gatedblock(dilation, dropout, kernels, kernel_size):
    """Keras compatible Dilated convolution layer

    Includes Gated activation, skip connections, batch normalization and dropout
    """

    def f(input_):
        norm = BatchNormalization()(input_)
        # Dropout of inputs
        drop = Dropout(dropout)(norm)
        # Normal activation
        normal_out = Conv1D(kernels, kernel_size, dilation_rate=dilation, activation='tanh', padding='same')(drop)
        # Gate
        gate_out = Conv1D(kernels, kernel_size, dilation_rate=dilation, activation='sigmoid', padding='same')(drop)
        # Point-wise nonlinear · gate
        merged = multiply([normal_out, gate_out])
        # Activation after gate
        skip_out = Conv1D(kernels, 1, activation='tanh')(merged)
        # Residual connections: allow the network input to skip the
        # whole block if necessary
        out = add([skip_out, input_])
        return out, skip_out

    return f 
开发者ID:albarji,项目名称:neurowriter,代码行数:26,代码来源:models.py

示例11: self_attention

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def self_attention(x):
    
    ''' 
    .  stands for dot product 
    *  stands for elemwise multiplication
        
    m = x . transpose(x)
    n = softmax(m)
    o = n . x  
    a = o * x           
       
    return a
        
    '''

    m = dot([x, x], axes=[2,2])
    n = Activation('softmax')(m)
    o = dot([n, x], axes=[2,1])
    a = multiply([o, x])
        
    return a 
开发者ID:soujanyaporia,项目名称:contextual-multimodal-fusion,代码行数:23,代码来源:trimodal_attention_models.py

示例12: segmentor

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def segmentor(self, start_filters=64, filter_inc_rate=2, out_ch=1, depth=2):
        """
        Creates recursively a segmentor model a.k.a. generator in GAN literature
        """
        inp = Input(shape=self.shape)
        first_block = convl1_lrelu(inp, start_filters, 4, 2)
        middle_blocks = level_block(first_block, int(start_filters * 2), depth=depth,
                                    filter_inc_rate=filter_inc_rate, p=0.1)
        if self.softmax:
            last_block = upsampl_softmax(middle_blocks, out_ch+1, 3, 1, 2, self.max_project) # out_ch+1, because softmax needs crossentropy
        else:
            last_block = upsampl_conv(middle_blocks, out_ch, 3, 1, 2)
        if self.crop:
            out = multiply([inp, last_block])  # crop input with predicted mask
            return Model([inp], [out], name='segmentor_net')
        return Model([inp], [last_block], name='segmentor_net')
        #return Model([inp], [last_block], name='segmentor_net') 
开发者ID:iNLyze,项目名称:DeepLearning-SeGAN-Segmentation,代码行数:19,代码来源:SeGAN.py

示例13: critic

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def critic(self):
        """
        Creates a critic a.k.a. discriminator model
        """
        # Note: Future improvement is to provide definable depth of critic
        inp_cropped = Input(self.shape, name='inp_cropped_image')  # Data cropped with generated OR g.t. mask


        shared_1 = shared_convl1_lrelu(self.shape, 64, 4, 2, name='shared_1_conv_lrelu')
        shared_2 = shared_convl1_bn_lrelu((16, 16, 64), 128, 4, 2, name='shared_2_conv_bn_lrelu')
        shared_3 = shared_convl1_bn_lrelu((8, 8, 128), 256, 4, 2, name='shared_3_conv_bn_lrelu')
        shared_4 = shared_convl1_bn_lrelu((4, 4, 256), 512, 4, 2, name='shared_4_conv_bn_lrelu')

        x1_S = shared_1(inp_cropped)
        #x1_S = shared_1(multiply([inp, mask]))
        x2_S = shared_2(x1_S)
        x3_S = shared_3(x2_S)
        x4_S = shared_4(x3_S)
        features = Concatenate(name='features_S')(
            [Flatten()(inp_cropped), Flatten()(x1_S), Flatten()(x2_S), Flatten()(x3_S), Flatten()(x4_S)]
            #[Flatten()(inp), Flatten()(x1_S), Flatten()(x2_S), Flatten()(x3_S), Flatten()(x4_S)]
        )
        return Model(inp_cropped, features, name='critic_net')
        #return Model([inp, mask], features, name='critic_net') 
开发者ID:iNLyze,项目名称:DeepLearning-SeGAN-Segmentation,代码行数:26,代码来源:SeGAN.py

示例14: apn_module

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def apn_module(self, x):

        def right(x):
            x = layers.AveragePooling2D()(x)
            x = layers.Conv2D(self.classes, kernel_size=1, padding='same')(x)
            x = layers.BatchNormalization()(x)
            x = layers.Activation('relu')(x)
            x = layers.UpSampling2D(interpolation='bilinear')(x)
            return x

        def conv(x, filters, kernel_size, stride):
            x = layers.Conv2D(filters, kernel_size=kernel_size, strides=(stride, stride), padding='same')(x)
            x = layers.BatchNormalization()(x)
            x = layers.Activation('relu')(x)
            return x

        x_7 = conv(x, int(x.shape[-1]), 7, stride=2)
        x_5 = conv(x_7, int(x.shape[-1]), 5, stride=2)
        x_3 = conv(x_5, int(x.shape[-1]), 3, stride=2)

        x_3_1 = conv(x_3, self.classes, 3, stride=1)
        x_3_1_up = layers.UpSampling2D(interpolation='bilinear')(x_3_1)
        x_5_1 = conv(x_5, self.classes, 5, stride=1)
        x_3_5 = layers.add([x_5_1, x_3_1_up])
        x_3_5_up = layers.UpSampling2D(interpolation='bilinear')(x_3_5)
        x_7_1 = conv(x_7, self.classes, 3, stride=1)
        x_3_5_7 = layers.add([x_7_1, x_3_5_up])
        x_3_5_7_up = layers.UpSampling2D(interpolation='bilinear')(x_3_5_7)

        x_middle = conv(x, self.classes, 1, stride=1)
        x_middle = layers.multiply([x_3_5_7_up, x_middle])

        x_right = right(x)
        x_middle = layers.add([x_middle, x_right])
        return x_middle 
开发者ID:JACKYLUO1991,项目名称:Face-skin-hair-segmentaiton-and-skin-color-evaluation,代码行数:37,代码来源:lednet.py

示例15: build_generator

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import multiply [as 别名]
def build_generator(self):

        model = Sequential()

        model.add(Dense(256, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label))

        model_input = multiply([noise, label_embedding])
        img = model(model_input)

        return Model([noise, label], img) 
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:28,代码来源:cgan.py


注:本文中的keras.layers.multiply方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。