當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.LeakyReLU方法代碼示例

本文整理匯總了Python中tensorflow.keras.layers.LeakyReLU方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.LeakyReLU方法的具體用法?Python layers.LeakyReLU怎麽用?Python layers.LeakyReLU使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.keras.layers的用法示例。


在下文中一共展示了layers.LeakyReLU方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def __init__(self,
                 in_channels,
                 out_channels,
                 alpha,
                 data_format="channels_last",
                 **kwargs):
        super(DarkUnit, self).__init__(**kwargs)
        assert (out_channels % 2 == 0)
        mid_channels = out_channels // 2

        self.conv1 = conv1x1_block(
            in_channels=in_channels,
            out_channels=mid_channels,
            activation=nn.LeakyReLU(alpha=alpha),
            data_format=data_format,
            name="conv1")
        self.conv2 = conv3x3_block(
            in_channels=mid_channels,
            out_channels=out_channels,
            activation=nn.LeakyReLU(alpha=alpha),
            data_format=data_format,
            name="conv2") 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:24,代碼來源:darknet53.py

示例2: encoder_layer

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def encoder_layer(inputs,
                  filters=16,
                  kernel_size=3,
                  strides=2,
                  activation='relu',
                  instance_norm=True):
    """Builds a generic encoder layer made of Conv2D-IN-LeakyReLU
    IN is optional, LeakyReLU may be replaced by ReLU

    """

    conv = Conv2D(filters=filters,
                  kernel_size=kernel_size,
                  strides=strides,
                  padding='same')

    x = inputs
    if instance_norm:
        x = InstanceNormalization()(x)
    if activation == 'relu':
        x = Activation('relu')(x)
    else:
        x = LeakyReLU(alpha=0.2)(x)
    x = conv(x)
    return x 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:27,代碼來源:cyclegan-7.1.1.py

示例3: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def __init__(self,
                 filters=64,
                 lrelu_alpha=0.2,
                 pad_type="constant",
                 norm_type="batch",
                 **kwargs):
        super(StridedConv, self).__init__(name="StridedConv")

        self.model = tf.keras.models.Sequential()
        self.model.add(get_padding(pad_type, (1, 1)))
        self.model.add(Conv2D(filters, 3, strides=(2, 2)))
        self.model.add(LeakyReLU(lrelu_alpha))
        self.model.add(get_padding(pad_type, (1, 1)))
        self.model.add(Conv2D(filters * 2, 3))
        self.model.add(get_norm(norm_type))
        self.model.add(LeakyReLU(lrelu_alpha)) 
開發者ID:mnicnc404,項目名稱:CartoonGan-tensorflow,代碼行數:18,代碼來源:layers.py

示例4: conv2d_unit

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def conv2d_unit(x, filters, kernels, strides=1):
    """Convolution Unit
    This function defines a 2D convolution operation with BN and LeakyReLU.
    # Arguments
        x: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernels: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution along the width and
            height. Can be a single integer to specify the same value for
            all spatial dimensions.
    # Returns
            Output tensor.
    """
    x = Conv2D(filters, kernels,
               padding='same',
               strides=strides,
               activation='linear',
               kernel_regularizer=l2(5e-4))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)

    return x 
開發者ID:1044197988,項目名稱:TF.Keras-Commonly-used-models,代碼行數:26,代碼來源:Darknet53.py

示例5: residual_block

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def residual_block(inputs, filters):
    """Residual Block
    This function defines a 2D convolution operation with BN and LeakyReLU.
    # Arguments
        x: Tensor, input tensor of residual block.
        kernels: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
    # Returns
        Output tensor.
    """
    x = conv2d_unit(inputs, filters, (1, 1))
    x = conv2d_unit(x, 2 * filters, (3, 3))
    x = add([inputs, x])
    x = Activation('linear')(x)

    return x 
開發者ID:1044197988,項目名稱:TF.Keras-Commonly-used-models,代碼行數:18,代碼來源:Darknet53.py

示例6: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def __init__(self,
                 in_feats,
                 out_feats,
                 num_heads,
                 feat_drop=0.,
                 attn_drop=0.,
                 negative_slope=0.2,
                 residual=False,
                 activation=None):
        super(GATConv, self).__init__()
        self._num_heads = num_heads
        self._in_feats = in_feats
        self._out_feats = out_feats
        xinit = tf.keras.initializers.VarianceScaling(scale=np.sqrt(
            2), mode="fan_avg", distribution="untruncated_normal")
        if isinstance(in_feats, tuple):
            self.fc_src = layers.Dense(
                out_feats * num_heads, use_bias=False, kernel_initializer=xinit)
            self.fc_dst = layers.Dense(
                out_feats * num_heads, use_bias=False, kernel_initializer=xinit)
        else:
            self.fc = layers.Dense(
                out_feats * num_heads, use_bias=False, kernel_initializer=xinit)
        self.attn_l = tf.Variable(initial_value=xinit(
            shape=(1, num_heads, out_feats), dtype='float32'), trainable=True)
        self.attn_r = tf.Variable(initial_value=xinit(
            shape=(1, num_heads, out_feats), dtype='float32'), trainable=True)
        self.feat_drop = layers.Dropout(rate=feat_drop)
        self.attn_drop = layers.Dropout(rate=attn_drop)
        self.leaky_relu = layers.LeakyReLU(alpha=negative_slope)
        if residual:
            if in_feats != out_feats:
                self.res_fc = layers.Dense(
                    num_heads * out_feats, use_bias=False, kernel_initializer=xinit)
            else:
                self.res_fc = Identity()
        else:
            self.res_fc = None
            # self.register_buffer('res_fc', None)
        self.activation = activation 
開發者ID:dmlc,項目名稱:dgl,代碼行數:42,代碼來源:gatconv.py

示例7: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def __init__(self,
                 passes,
                 backbone_out_channels,
                 outs_channels,
                 depth,
                 growth_rate,
                 use_bn,
                 in_channels=3,
                 in_size=(256, 256),
                 data_format="channels_last",
                 **kwargs):
        super(IbpPose, self).__init__(**kwargs)
        self.in_size = in_size
        self.data_format = data_format
        activation = nn.LeakyReLU(alpha=0.01)

        self.backbone = IbpBackbone(
            in_channels=in_channels,
            out_channels=backbone_out_channels,
            activation=activation,
            data_format=data_format,
            name="backbone")

        self.decoder = SimpleSequential(name="decoder")
        for i in range(passes):
            merge = (i != passes - 1)
            self.decoder.add(IbpPass(
                channels=backbone_out_channels,
                mid_channels=outs_channels,
                depth=depth,
                growth_rate=growth_rate,
                merge=merge,
                use_bn=use_bn,
                activation=activation,
                data_format=data_format,
                name="pass{}".format(i + 1))) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:38,代碼來源:ibppose_coco.py

示例8: dark_convYxY

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def dark_convYxY(in_channels,
                 out_channels,
                 alpha,
                 pointwise,
                 data_format="channels_last",
                 **kwargs):
    """
    DarkNet unit.

    Parameters:
    ----------
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    alpha : float
        Slope coefficient for Leaky ReLU activation.
    pointwise : bool
        Whether use 1x1 (pointwise) convolution or 3x3 convolution.
    data_format : str, default 'channels_last'
        The ordering of the dimensions in tensors.
    """
    if pointwise:
        return conv1x1_block(
            in_channels=in_channels,
            out_channels=out_channels,
            activation=nn.LeakyReLU(alpha=alpha),
            data_format=data_format,
            **kwargs)
    else:
        return conv3x3_block(
            in_channels=in_channels,
            out_channels=out_channels,
            activation=nn.LeakyReLU(alpha=alpha),
            data_format=data_format,
            **kwargs) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:38,代碼來源:darknet.py

示例9: build_discriminator

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def build_discriminator(inputs):
    """Build a Discriminator Model

    Stack of LeakyReLU-Conv2D to discriminate real from fake.
    The network does not converge with BN so it is not used here
    unlike in [1] or original paper.

    Arguments:
        inputs (Layer): Input layer of the discriminator (the image)

    Returns:
        discriminator (Model): Discriminator Model
    """
    kernel_size = 5
    layer_filters = [32, 64, 128, 256]

    x = inputs
    for filters in layer_filters:
        # first 3 convolution layers use strides = 2
        # last one uses strides = 1
        if filters == layer_filters[-1]:
            strides = 1
        else:
            strides = 2
        x = LeakyReLU(alpha=0.2)(x)
        x = Conv2D(filters=filters,
                   kernel_size=kernel_size,
                   strides=strides,
                   padding='same')(x)

    x = Flatten()(x)
    x = Dense(1)(x)
    x = Activation('sigmoid')(x)
    discriminator = Model(inputs, x, name='discriminator')
    return discriminator 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:37,代碼來源:dcgan-mnist-4.2.1.py

示例10: decoder_layer

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def decoder_layer(inputs,
                  paired_inputs,
                  filters=16,
                  kernel_size=3,
                  strides=2,
                  activation='relu',
                  instance_norm=True):
    """Builds a generic decoder layer made of Conv2D-IN-LeakyReLU
    IN is optional, LeakyReLU may be replaced by ReLU
    Arguments: (partial)
    inputs (tensor): the decoder layer input
    paired_inputs (tensor): the encoder layer output 
          provided by U-Net skip connection &
          concatenated to inputs.

    """

    conv = Conv2DTranspose(filters=filters,
                           kernel_size=kernel_size,
                           strides=strides,
                           padding='same')

    x = inputs
    if instance_norm:
        x = InstanceNormalization()(x)
    if activation == 'relu':
        x = Activation('relu')(x)
    else:
        x = LeakyReLU(alpha=0.2)(x)
    x = conv(x)
    x = concatenate([x, paired_inputs])
    return x 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:34,代碼來源:cyclegan-7.1.1.py

示例11: get_model_meta

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def get_model_meta(filename):
    print("Loading model " + filename)
    global use_tf_keras
    global Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K
    try:
        from keras.models import load_model as load_model_keras
        ret = get_model_meta_real(filename, load_model_keras)
        # model is successfully loaded. Import layers from keras
        from keras.models import Sequential
        from keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
        from keras.layers import Conv2D, MaxPooling2D
        from keras.layers import LeakyReLU
        from keras import regularizers
        from keras import backend as K
        print("Model imported using keras")
    except (KeyboardInterrupt, SystemExit, SyntaxError, NameError, IndentationError):
        raise
    except:
        print("Failed to load model with keras. Trying tf.keras...")
        use_tf_keras = True
        from tensorflow.keras.models import load_model as load_model_tf
        ret = get_model_meta_real(filename, load_model_tf)
        # model is successfully loaded. Import layers from tensorflow.keras
        from tensorflow.keras.models import Sequential
        from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
        from tensorflow.keras.layers import Conv2D, MaxPooling2D
        from tensorflow.keras.layers import LeakyReLU
        from tensorflow.keras import regularizers
        from tensorflow.keras import backend as K
        print("Model imported using tensorflow.keras")
    # put imported functions in global
    Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K = \
        Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K
    return ret 
開發者ID:huanzhang12,項目名稱:CROWN-IBP,代碼行數:36,代碼來源:mnist_cifar_models.py

示例12: get_model_meta_real

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def get_model_meta_real(filename, model_loader):
    model = model_loader(filename, custom_objects = {"fn": lambda y_true, y_pred: y_pred, "tf": tf})
    json_string = model.to_json()
    model_meta = json.loads(json_string)
    weight_dims = []
    activations = set()
    activation_param = None
    input_dim = []
    # print(model_meta)
    try:
        # for keras
        model_layers = model_meta['config']['layers']
    except (KeyError, TypeError):
        # for tensorflow.keras
        model_layers = model_meta['config']
    for i, layer in enumerate(model_layers):
        if i ==0 and layer['class_name'] == "Flatten":
            input_dim = layer['config']['batch_input_shape']
        if layer['class_name'] == "Dense":
            units = layer['config']['units']
            weight_dims.append(units)
            activation = layer['config']['activation']
            if activation != 'linear':
                activations.add(activation)
        elif layer['class_name'] == "Activation":
            activation = layer['config']['activation']
            activations.add(activation)
        elif layer['class_name'] == "LeakyReLU":
            activation_param = layer['config']['alpha']
            activations.add("leaky")
        elif layer['class_name'] == "Lambda":
            if "arctan" in layer['config']["name"]:
                activation = "arctan"
                activations.add("arctan")
    assert len(activations) == 1, "only one activation is supported," + str(activations)
    return weight_dims, list(activations)[0], activation_param, input_dim 
開發者ID:huanzhang12,項目名稱:CROWN-IBP,代碼行數:38,代碼來源:mnist_cifar_models.py

示例13: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def __init__(self,
                 base_filters=32,
                 lrelu_alpha=0.2,
                 pad_type="reflect",
                 norm_type="batch"):
        super(Discriminator, self).__init__(name="Discriminator")
        if pad_type == "reflect":
            self.flat_pad = ReflectionPadding2D()
        elif pad_type == "constant":
            self.flat_pad = ZeroPadding2D()
        else:
            raise ValueError(f"pad_type not recognized {pad_type}")

        self.flat_conv = Conv2D(base_filters, 3)
        self.flat_lru = LeakyReLU(lrelu_alpha)
        self.strided_conv1 = StridedConv(base_filters * 2,
                                         lrelu_alpha,
                                         pad_type,
                                         norm_type)
        self.strided_conv2 = StridedConv(base_filters * 4,
                                         lrelu_alpha,
                                         pad_type,
                                         norm_type)
        self.conv2 = Conv2D(base_filters * 8, 3)

        if norm_type == "instance":
            self.norm = InstanceNormalization()
        elif norm_type == "batch":
            self.norm = BatchNormalization()

        self.lrelu = LeakyReLU(lrelu_alpha)

        self.final_conv = Conv2D(1, 3) 
開發者ID:mnicnc404,項目名稱:CartoonGan-tensorflow,代碼行數:35,代碼來源:discriminator.py

示例14: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def __init__(self,
                 alpha=0.25,
                 **kwargs):
        super(PReLU2, self).__init__(**kwargs)
        self.active = nn.LeakyReLU(alpha=alpha) 
開發者ID:tucan9389,項目名稱:tf2-mobile-pose-estimation,代碼行數:7,代碼來源:common.py

示例15: convolution_block

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import LeakyReLU [as 別名]
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
    x = Conv2D(filters, size, strides=strides, padding=padding)(x)
    x = BatchNormalization()(x)
    if activation == True:
        x = LeakyReLU(alpha=0.1)(x)
    return x 
開發者ID:1044197988,項目名稱:TF.Keras-Commonly-used-models,代碼行數:8,代碼來源:Unet_Xception_Resnetblock.py


注:本文中的tensorflow.keras.layers.LeakyReLU方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。