当前位置: 首页>>代码示例>>Python>>正文


Python layers.Multiply方法代码示例

本文整理汇总了Python中tensorflow.keras.layers.Multiply方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Multiply方法的具体用法?Python layers.Multiply怎么用?Python layers.Multiply使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.keras.layers的用法示例。


在下文中一共展示了layers.Multiply方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _se_block

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Multiply [as 别名]
def _se_block(inputs, filters, se_ratio, prefix):
    x = GlobalAveragePooling2D(name=prefix + 'squeeze_excite/AvgPool')(inputs)
    if K.image_data_format() == 'channels_first':
        x = Reshape((filters, 1, 1))(x)
    else:
        x = Reshape((1, 1, filters))(x)
    x = Conv2D(_depth(filters * se_ratio),
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv')(x)
    x = ReLU(name=prefix + 'squeeze_excite/Relu')(x)
    x = Conv2D(filters,
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv_1')(x)
    x = Activation(hard_sigmoid)(x)
    #if K.backend() == 'theano':
        ## For the Theano backend, we have to explicitly make
        ## the excitation weights broadcastable.
        #x = Lambda(
            #lambda br: K.pattern_broadcast(br, [True, True, True, False]),
            #output_shape=lambda input_shape: input_shape,
            #name=prefix + 'squeeze_excite/broadcast')(x)
    x = Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x])
    return x 
开发者ID:david8862,项目名称:keras-YOLOv3-model-set,代码行数:27,代码来源:mobilenet_v3.py

示例2: TemporalDropout

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Multiply [as 别名]
def TemporalDropout(inputs, dropout=0.0):
    """
    Drops with :dropout probability temporal steps of input 3D tensor
    """
    # TO DO: adapt for >3D tensors
    if dropout == 0.0:
        return inputs
    inputs_func = lambda x: K.ones_like(inputs[:, :, 0:1])
    inputs_mask = Lambda(inputs_func)(inputs)
    inputs_mask = Dropout(dropout)(inputs_mask)
    tiling_shape = [1, 1, K.shape(inputs)[2]] + [1] * (K.ndim(inputs) - 3)
    inputs_mask = Lambda(K.tile, arguments={"n": tiling_shape},
                         output_shape=inputs._keras_shape[1:])(inputs_mask)
    answer = Multiply()([inputs, inputs_mask])
    return answer 
开发者ID:deepmipt,项目名称:DeepPavlov,代码行数:17,代码来源:cells.py

示例3: multiplicative_self_attention

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Multiply [as 别名]
def multiplicative_self_attention(units, n_hidden=None, n_output_features=None, activation=None):
    """
    Compute multiplicative self attention for time series of vectors (with batch dimension)
    the formula: score(h_i, h_j) = <W_1 h_i,  W_2 h_j>,  W_1 and W_2 are learnable matrices
    with dimensionality [n_hidden, n_input_features]

    Args:
        units: tf tensor with dimensionality [batch_size, time_steps, n_input_features]
        n_hidden: number of units in hidden representation of similarity measure
        n_output_features: number of features in output dense layer
        activation: activation at the output

    Returns:
        output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features]
    """
    n_input_features = K.int_shape(units)[2]
    if n_hidden is None:
        n_hidden = n_input_features
    if n_output_features is None:
        n_output_features = n_input_features
    exp1 = Lambda(lambda x: expand_tile(x, axis=1))(units)
    exp2 = Lambda(lambda x: expand_tile(x, axis=2))(units)
    queries = Dense(n_hidden)(exp1)
    keys = Dense(n_hidden)(exp2)
    scores = Lambda(lambda x: K.sum(queries * x, axis=3, keepdims=True))(keys)
    attention = Lambda(lambda x: softmax(x, axis=2))(scores)
    mult = Multiply()([attention, exp1])
    attended_units = Lambda(lambda x: K.sum(x, axis=2))(mult)
    output = Dense(n_output_features, activation=activation)(attended_units)
    return output 
开发者ID:deepmipt,项目名称:DeepPavlov,代码行数:32,代码来源:keras_layers.py

示例4: channel_squeeze_excite_block

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Multiply [as 别名]
def channel_squeeze_excite_block(input, ratio=0.25):
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = init._keras_shape[channel_axis]
    cse_shape = (1, 1, filters)

    cse = layers.GlobalAveragePooling2D()(init)
    cse = layers.Reshape(cse_shape)(cse)
    ratio_filters = int(np.round(filters * ratio))
    if ratio_filters < 1:
        ratio_filters += 1
    cse = layers.Conv2D(
        ratio_filters,
        (1, 1),
        padding="same",
        activation="relu",
        kernel_initializer="he_normal",
        use_bias=False,
    )(cse)
    cse = layers.BatchNormalization()(cse)
    cse = layers.Conv2D(
        filters,
        (1, 1),
        activation="sigmoid",
        kernel_initializer="he_normal",
        use_bias=False,
    )(cse)

    if K.image_data_format() == "channels_first":
        cse = layers.Permute((3, 1, 2))(cse)

    cse = layers.Multiply()([init, cse])
    return cse 
开发者ID:jgraving,项目名称:DeepPoseKit,代码行数:35,代码来源:squeeze_excitation.py

示例5: spatial_squeeze_excite_block

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Multiply [as 别名]
def spatial_squeeze_excite_block(input):
    sse = layers.Conv2D(
        1,
        (1, 1),
        activation="sigmoid",
        padding="same",
        kernel_initializer="he_normal",
        use_bias=False,
    )(input)
    sse = layers.Multiply()([input, sse])

    return sse 
开发者ID:jgraving,项目名称:DeepPoseKit,代码行数:14,代码来源:squeeze_excitation.py

示例6: _fca_block

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Multiply [as 别名]
def _fca_block(inputs, reduct_ratio, block_id):
    in_channels = inputs.shape.as_list()[-1]
    #in_shapes = inputs.shape.as_list()[1:3]
    reduct_channels = int(in_channels // reduct_ratio)
    prefix = 'fca_block_{}_'.format(block_id)
    x = GlobalAveragePooling2D(name=prefix + 'average_pooling')(inputs)
    x = Dense(reduct_channels, activation='relu', name=prefix + 'fc1')(x)
    x = Dense(in_channels, activation='sigmoid', name=prefix + 'fc2')(x)

    x = Reshape((1,1,in_channels),name='reshape')(x)
    x = Multiply(name=prefix + 'multiply')([x, inputs])
    return x 
开发者ID:david8862,项目名称:keras-YOLOv3-model-set,代码行数:14,代码来源:yolo3_nano.py

示例7: hard_swish

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Multiply [as 别名]
def hard_swish(x):
    return Multiply()([Activation(hard_sigmoid)(x), x])


# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/
# slim/nets/mobilenet/mobilenet.py 
开发者ID:david8862,项目名称:keras-YOLOv3-model-set,代码行数:11,代码来源:mobilenet_v3.py

示例8: add_adapter

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Multiply [as 别名]
def add_adapter(self, all_layers, task, layer_num):
    """Add an adapter connection for given task/layer combo"""
    i = layer_num
    prev_layers = []
    trainable_layers = []
    # Handle output layer
    if i < len(self.layer_sizes):
      layer_sizes = self.layer_sizes
      alpha_init_stddev = self.alpha_init_stddevs[i]
      weight_init_stddev = self.weight_init_stddevs[i]
      bias_init_const = self.bias_init_consts[i]
    elif i == len(self.layer_sizes):
      layer_sizes = self.layer_sizes + [self.n_outputs]
      alpha_init_stddev = self.alpha_init_stddevs[-1]
      weight_init_stddev = self.weight_init_stddevs[-1]
      bias_init_const = self.bias_init_consts[-1]
    else:
      raise ValueError("layer_num too large for add_adapter.")
    # Iterate over all previous tasks.
    for prev_task in range(task):
      prev_layers.append(all_layers[(i - 1, prev_task)])
    # prev_layers is a list with elements of size
    # (batch_size, layer_sizes[i-1])
    if len(prev_layers) == 1:
      prev_layer = prev_layers[0]
    else:
      prev_layer = Concatenate(axis=1)(prev_layers)
    alpha = layers.Variable(
        tf.random.truncated_normal((1,), stddev=alpha_init_stddev))
    trainable_layers.append(alpha)

    prev_layer = Multiply()([prev_layer, alpha([prev_layer])])
    dense1 = Dense(
        layer_sizes[i - 1],
        kernel_initializer=tf.keras.initializers.TruncatedNormal(
            stddev=weight_init_stddev),
        bias_initializer=tf.constant_initializer(value=bias_init_const))
    prev_layer = dense1(prev_layer)
    trainable_layers.append(dense1)

    dense2 = Dense(
        layer_sizes[i],
        kernel_initializer=tf.keras.initializers.TruncatedNormal(
            stddev=weight_init_stddev),
        use_bias=False)
    prev_layer = dense2(prev_layer)
    trainable_layers.append(dense2)

    return prev_layer, trainable_layers 
开发者ID:deepchem,项目名称:deepchem,代码行数:51,代码来源:progressive_multitask.py

示例9: os_bottleneck

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Multiply [as 别名]
def os_bottleneck(x,
                  out_filters,
                  bottleneck_reduction=4):
    """Utility function to implement the OSNet bottleneck module.

    # Arguments
        x: input tensor.
        out_filters: number of output filters.

    # Returns
        Output tensor after applying the OSNet bottleneck.
    """
    in_filters = x.shape[-1].value
    mid_filters = out_filters // bottleneck_reduction
    identity = x
    x1 = conv2d_bn(x, mid_filters, kernel_size=(1, 1))

    branch1 = light_conv3x3_bn(x1, mid_filters)
    branch2 = light_conv3x3_bn(x1, mid_filters)
    branch2 = light_conv3x3_bn(branch2, mid_filters)
    branch3 = light_conv3x3_bn(x1, mid_filters)
    branch3 = light_conv3x3_bn(branch3, mid_filters)
    branch3 = light_conv3x3_bn(branch3, mid_filters)
    branch4 = light_conv3x3_bn(x1, mid_filters)
    branch4 = light_conv3x3_bn(branch4, mid_filters)
    branch4 = light_conv3x3_bn(branch4, mid_filters)
    branch4 = light_conv3x3_bn(branch4, mid_filters)

    gate = get_aggregation_gate(mid_filters)
    x2 = layers.Add()([
        layers.Multiply()([branch1, gate(branch1)]),
        layers.Multiply()([branch2, gate(branch2)]),
        layers.Multiply()([branch3, gate(branch3)]),
        layers.Multiply()([branch4, gate(branch4)])])

    x3 = conv2d_bn(x2, out_filters, kernel_size=(1, 1), activation=None)
    if in_filters != out_filters:
        identity = conv2d_bn(identity, out_filters, kernel_size=(1, 1), activation=None)

    out = layers.Add()([identity, x3])  # residual connection
    out = layers.Activation('relu')(out)
    return out 
开发者ID:jkjung-avt,项目名称:keras_imagenet,代码行数:44,代码来源:osnet.py


注:本文中的tensorflow.keras.layers.Multiply方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。