当前位置: 首页>>代码示例>>Python>>正文


Python convolutional.Conv2DTranspose方法代码示例

本文整理汇总了Python中keras.layers.convolutional.Conv2DTranspose方法的典型用法代码示例。如果您正苦于以下问题:Python convolutional.Conv2DTranspose方法的具体用法?Python convolutional.Conv2DTranspose怎么用?Python convolutional.Conv2DTranspose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.layers.convolutional的用法示例。


在下文中一共展示了convolutional.Conv2DTranspose方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: generator

# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Conv2DTranspose [as 别名]
def generator(input_dim,alpha=0.2):
    model = Sequential()
    model.add(Dense(input_dim=input_dim, output_dim=4*4*512))
    model.add(Reshape(target_shape=(4,4,512)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha))
    model.add(Conv2DTranspose(256, kernel_size=5, strides=2, padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha))
    model.add(Conv2DTranspose(128, kernel_size=5, strides=2, padding='same'))   
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha))
    model.add(Conv2DTranspose(3, kernel_size=5, strides=2, padding='same'))   
    model.add(Activation('tanh'))
    return model

#Define the Discriminator Network 
开发者ID:PacktPublishing,项目名称:Intelligent-Projects-Using-Python,代码行数:19,代码来源:captcha_gan.py

示例2: __transition_up_block

# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Conv2DTranspose [as 别名]
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4):
    ''' SubpixelConvolutional Upscaling (factor = 2)
    Args:
        ip: keras tensor
        nb_filters: number of layers
        type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed
        weight_decay: weight decay factor
    Returns: keras tensor, after applying upsampling operation.
    '''

    if type == 'upsampling':
        x = UpSampling2D()(ip)
    elif type == 'subpixel':
        x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
                   use_bias=False, kernel_initializer='he_normal')(ip)
        x = SubPixelUpscaling(scale_factor=2)(x)
        x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
                   use_bias=False, kernel_initializer='he_normal')(x)
    else:
        x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2),
                            kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip)

    return x 
开发者ID:OlafenwaMoses,项目名称:Model-Playgrounds,代码行数:25,代码来源:densenet.py

示例3: transpose_conv_block

# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Conv2DTranspose [as 别名]
def transpose_conv_block(input_tensor,
                         channel,
                         kernel_size,
                         strides=(2, 2),
                         dropout_rate=0.4
                         ):

    skip = input_tensor

    input_tensor = BatchNormalization()(Activation("relu")(input_tensor))
    input_tensor = Dropout(dropout_rate)(input_tensor)
    input_tensor = Conv2D(channel, kernel_size, strides=(1, 1), padding="same")(input_tensor)

    input_tensor = BatchNormalization()(Activation("relu")(input_tensor))
    input_tensor = Dropout(dropout_rate)(input_tensor)
    input_tensor = Conv2DTranspose(channel, kernel_size, strides=strides, padding="same")(input_tensor)

    if (strides != (1, 1)):
        skip = Conv2DTranspose(channel, (1, 1), strides=strides, padding="same")(skip)
    input_tensor = add([input_tensor, skip])

    return input_tensor 
开发者ID:s603122001,项目名称:Vocal-Melody-Extraction,代码行数:24,代码来源:model.py

示例4: resnet_block_generator

# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Conv2DTranspose [as 别名]
def resnet_block_generator(input, n_blocks, n_filters, kernel_size=(3, 3), stride=2):
    output = input
    for i in range(n_blocks):
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Conv2DTranspose(filters=n_filters, kernel_size=kernel_size,
                                 strides=stride, padding='same',
                                 kernel_initializer=weight_init)(output)

        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Conv2D(filters=n_filters, kernel_size=kernel_size, strides=1,
                        padding='same', kernel_initializer=weight_init)(output)

        if input.shape[1:] != output.shape[1:]:
            # Upsample input to match output dimension
            input = UpsampleConv(input, n_filters)
            print("resnet: adding layer to match residual input to output")

        # Residual Connection
        output = Add()([input, output])

    return output 
开发者ID:PacktPublishing,项目名称:Hands-On-Generative-Adversarial-Networks-with-Keras,代码行数:25,代码来源:resnet.py

示例5: get_model

# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Conv2DTranspose [as 别名]
def get_model(t):
    from keras.models import Model
    from keras.layers.convolutional import Conv2D, Conv2DTranspose
    from keras.layers.convolutional_recurrent import ConvLSTM2D
    from keras.layers.normalization import BatchNormalization
    from keras.layers.wrappers import TimeDistributed
    from keras.layers.core import Activation
    from keras.layers import Input

    input_tensor = Input(shape=(t, 224, 224, 1))

    conv1 = TimeDistributed(Conv2D(128, kernel_size=(11, 11), padding='same', strides=(4, 4), name='conv1'),
                            input_shape=(t, 224, 224, 1))(input_tensor)
    conv1 = TimeDistributed(BatchNormalization())(conv1)
    conv1 = TimeDistributed(Activation('relu'))(conv1)

    conv2 = TimeDistributed(Conv2D(64, kernel_size=(5, 5), padding='same', strides=(2, 2), name='conv2'))(conv1)
    conv2 = TimeDistributed(BatchNormalization())(conv2)
    conv2 = TimeDistributed(Activation('relu'))(conv2)

    convlstm1 = ConvLSTM2D(64, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm1')(conv2)
    convlstm2 = ConvLSTM2D(32, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm2')(convlstm1)
    convlstm3 = ConvLSTM2D(64, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm3')(convlstm2)

    deconv1 = TimeDistributed(Conv2DTranspose(128, kernel_size=(5, 5), padding='same', strides=(2, 2), name='deconv1'))(convlstm3)
    deconv1 = TimeDistributed(BatchNormalization())(deconv1)
    deconv1 = TimeDistributed(Activation('relu'))(deconv1)

    decoded = TimeDistributed(Conv2DTranspose(1, kernel_size=(11, 11), padding='same', strides=(4, 4), name='deconv2'))(
        deconv1)

    return Model(inputs=input_tensor, outputs=decoded) 
开发者ID:johndpope,项目名称:abnormal-spatiotemporal-ae,代码行数:34,代码来源:classifier.py

示例6: make_generator

# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Conv2DTranspose [as 别名]
def make_generator():
    """Creates a generator model that takes a 100-dimensional noise vector as a "seed",
    and outputs images of size 28x28x1."""
    model = Sequential()
    model.add(Dense(1024, input_dim=100))
    model.add(LeakyReLU())
    model.add(Dense(128 * 7 * 7))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    if K.image_data_format() == 'channels_first':
        model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
        bn_axis = 1
    else:
        model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7,)))
        bn_axis = -1
    model.add(Conv2DTranspose(128, (5, 5), strides=2, padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    model.add(Convolution2D(64, (5, 5), padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    model.add(Conv2DTranspose(64, (5, 5), strides=2, padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    # Because we normalized training inputs to lie in the range [-1, 1],
    # the tanh function should be used for the output of the generator to ensure
    # its output also lies in this range.
    model.add(Convolution2D(1, (5, 5), padding='same', activation='tanh'))
    return model 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:31,代码来源:improved_wgan.py

示例7: build_generator

# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Conv2DTranspose [as 别名]
def build_generator(latent_size):
    # we will map a pair of (z, L), where z is a latent vector and L is a
    # label drawn from P_c, to image space (..., 28, 28, 1)
    cnn = Sequential()

    cnn.add(Dense(3 * 3 * 384, input_dim=latent_size, activation='relu'))
    cnn.add(Reshape((3, 3, 384)))

    # upsample to (7, 7, ...)
    cnn.add(Conv2DTranspose(192, 5, strides=1, padding='valid',
                            activation='relu',
                            kernel_initializer='glorot_normal'))
    cnn.add(BatchNormalization())

    # upsample to (14, 14, ...)
    cnn.add(Conv2DTranspose(96, 5, strides=2, padding='same',
                            activation='relu',
                            kernel_initializer='glorot_normal'))
    cnn.add(BatchNormalization())

    # upsample to (28, 28, ...)
    cnn.add(Conv2DTranspose(1, 5, strides=2, padding='same',
                            activation='tanh',
                            kernel_initializer='glorot_normal'))

    # this is the z space commonly referred to in GAN papers
    latent = Input(shape=(latent_size, ))

    # this will be our label
    image_class = Input(shape=(1,), dtype='int32')

    cls = Flatten()(Embedding(num_classes, latent_size,
                              embeddings_initializer='glorot_normal')(image_class))

    # hadamard product between z-space and a class conditional embedding
    h = layers.multiply([latent, cls])

    fake_image = cnn(h)

    return Model([latent, image_class], fake_image) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:42,代码来源:mnist_acgan.py

示例8: test_conv2d_transpose

# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Conv2DTranspose [as 别名]
def test_conv2d_transpose():
    num_samples = 2
    filters = 2
    stack_size = 3
    num_row = 5
    num_col = 6

    for padding in _convolution_paddings:
        for strides in [(1, 1), (2, 2)]:
            if padding == 'same' and strides != (1, 1):
                continue
            layer_test(convolutional.Deconvolution2D,
                       kwargs={'filters': filters,
                               'kernel_size': 3,
                               'padding': padding,
                               'strides': strides,
                               'data_format': 'channels_last'},
                       input_shape=(num_samples, num_row, num_col, stack_size),
                       fixed_batch_size=True)

    layer_test(convolutional.Deconvolution2D,
               kwargs={'filters': filters,
                       'kernel_size': 3,
                       'padding': padding,
                       'data_format': 'channels_first',
                       'activation': None,
                       'kernel_regularizer': 'l2',
                       'bias_regularizer': 'l2',
                       'activity_regularizer': 'l2',
                       'kernel_constraint': 'max_norm',
                       'bias_constraint': 'max_norm',
                       'strides': strides},
               input_shape=(num_samples, stack_size, num_row, num_col),
               fixed_batch_size=True)

    # Test invalid use case
    with pytest.raises(ValueError):
        model = Sequential([convolutional.Conv2DTranspose(filters=filters,
                                                          kernel_size=3,
                                                          padding=padding,
                                                          batch_input_shape=(None, None, 5, None))]) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:43,代码来源:convolutional_test.py

示例9: generator_model

# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Conv2DTranspose [as 别名]
def generator_model():
        """生成器模型
        """
        inputs = Input(Config.input_shape_generator)
        x = ReflectionPadding2D((3, 3))(inputs)
        print(x.shape)
        x = Conv2D(filters=Config.ngf, kernel_size=(7, 7), padding="valid")(x)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

        n_downsampling = 2
        for i in range(n_downsampling):
            mulit = 2**i
            x = Conv2D(filters=Config.ngf*mulit*2, kernel_size=(3, 3), strides=2, padding="same")(x)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)
        
        mulit = 2**n_downsampling
        for i in range(Config.n_blocks_gen):
            x = res_block(x, Config.ngf*mulit, use_dropout=True)

        for i in range(n_downsampling):
            mulit = 2**(n_downsampling-i)
            x = Conv2DTranspose(filters=int(Config.ngf*mulit/2), kernel_size=(3, 3), strides=2,
                                padding="same")(x)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)
        x = ReflectionPadding2D(padding=(3, 3))(x)
        x = Conv2D(filters=Config.output_nc, kernel_size=(7, 7), padding="valid")(x)
        x = Activation("tanh")(x)

        # 输出
        outputs = Add()([inputs, x])
        outputs = Lambda(lambda z: z/2)(outputs)
        print("generator : ",outputs.shape)
        model = Model(inputs=inputs, outputs=outputs, name="Generator")
        return model 
开发者ID:jarvisqi,项目名称:deep_learning,代码行数:39,代码来源:deblur_gan.py

示例10: adapter

# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Conv2DTranspose [as 别名]
def adapter(input_tensor,
            channel,
            kernel_size=(1, 9),
            strides=(1, 3),
            dropout_rate=0.2
            ):
    input_tensor = BatchNormalization()(Activation("relu")(input_tensor))
    input_tensor = Dropout(dropout_rate)(input_tensor)
    input_tensor = Conv2DTranspose(channel, kernel_size, strides=strides, padding="same")(input_tensor)

    return input_tensor 
开发者ID:s603122001,项目名称:Vocal-Melody-Extraction,代码行数:13,代码来源:model.py


注:本文中的keras.layers.convolutional.Conv2DTranspose方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。