当前位置: 首页>>代码示例>>Python>>正文


Python layers.ZeroPadding3D方法代码示例

本文整理汇总了Python中keras.layers.ZeroPadding3D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.ZeroPadding3D方法的具体用法?Python layers.ZeroPadding3D怎么用?Python layers.ZeroPadding3D使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.layers的用法示例。


在下文中一共展示了layers.ZeroPadding3D方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_keras_import

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import ZeroPadding3D [as 别名]
def test_keras_import(self):
        # Pad 1D
        model = Sequential()
        model.add(ZeroPadding1D(2, input_shape=(224, 3)))
        model.add(Conv1D(32, 7, strides=2))
        model.build()
        self.pad_test(model, 'pad_w', 2)
        # Pad 2D
        model = Sequential()
        model.add(ZeroPadding2D(2, input_shape=(224, 224, 3)))
        model.add(Conv2D(32, 7, strides=2))
        model.build()
        self.pad_test(model, 'pad_w', 2)
        # Pad 3D
        model = Sequential()
        model.add(ZeroPadding3D(2, input_shape=(224, 224, 224, 3)))
        model.add(Conv3D(32, 7, strides=2))
        model.build()
        self.pad_test(model, 'pad_w', 2)


# ********** Export json tests **********

# ********** Data Layers Test ********** 
开发者ID:Cloud-CV,项目名称:Fabrik,代码行数:26,代码来源:test_views.py

示例2: pad_to_multiple

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import ZeroPadding3D [as 别名]
def pad_to_multiple(x, block_shape):
    ori_shape = K.int_shape(x)

    pad_t = ori_shape[1] % block_shape[0]
    pad_f = ori_shape[2] % block_shape[1]
    padding=((0, 0), (0, pad_t), (0, pad_f))
    
    new_x = L.ZeroPadding3D(padding=padding)(x)

    return new_x 
开发者ID:BreezeWhite,项目名称:Music-Transcription-with-Semantic-Segmentation,代码行数:12,代码来源:attn_utils.py

示例3: c3d

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import ZeroPadding3D [as 别名]
def c3d(self):
        """
        Build a 3D convolutional network, aka C3D.
            https://arxiv.org/pdf/1412.0767.pdf

        With thanks:
            https://gist.github.com/albertomontesg/d8b21a179c1e6cca0480ebdf292c34d2
        """
        model = Sequential()
        # 1st layer group
        model.add(Conv3D(64, 3, 3, 3, activation='relu',
                         border_mode='same', name='conv1',
                         subsample=(1, 1, 1),
                         input_shape=self.input_shape))
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
                               border_mode='valid', name='pool1'))
        # 2nd layer group
        model.add(Conv3D(128, 3, 3, 3, activation='relu',
                         border_mode='same', name='conv2',
                         subsample=(1, 1, 1)))
        model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                               border_mode='valid', name='pool2'))
        # 3rd layer group
        model.add(Conv3D(256, 3, 3, 3, activation='relu',
                         border_mode='same', name='conv3a',
                         subsample=(1, 1, 1)))
        model.add(Conv3D(256, 3, 3, 3, activation='relu',
                         border_mode='same', name='conv3b',
                         subsample=(1, 1, 1)))
        model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                               border_mode='valid', name='pool3'))
        # 4th layer group
        model.add(Conv3D(512, 3, 3, 3, activation='relu',
                         border_mode='same', name='conv4a',
                         subsample=(1, 1, 1)))
        model.add(Conv3D(512, 3, 3, 3, activation='relu',
                         border_mode='same', name='conv4b',
                         subsample=(1, 1, 1)))
        model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                               border_mode='valid', name='pool4'))

        # 5th layer group
        model.add(Conv3D(512, 3, 3, 3, activation='relu',
                         border_mode='same', name='conv5a',
                         subsample=(1, 1, 1)))
        model.add(Conv3D(512, 3, 3, 3, activation='relu',
                         border_mode='same', name='conv5b',
                         subsample=(1, 1, 1)))
        model.add(ZeroPadding3D(padding=(0, 1, 1)))
        model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                               border_mode='valid', name='pool5'))
        model.add(Flatten())

        # FC layers group
        model.add(Dense(4096, activation='relu', name='fc6'))
        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu', name='fc7'))
        model.add(Dropout(0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model 
开发者ID:harvitronix,项目名称:five-video-classification-methods,代码行数:63,代码来源:models.py

示例4: multihead_attention

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import ZeroPadding3D [as 别名]
def multihead_attention(x, out_channel=64, d_model=32, n_heads=8, query_shape=(128, 24), memory_flange=(8, 8)):
    q = Conv2D(d_model, (3, 3), strides=(1, 1), padding="same", name="gen_q_conv")(x)
    k = Conv2D(d_model, (3, 3), strides=(1, 1), padding="same", name="gen_k_conv")(x)
    v = Conv2D(d_model, (3, 3), strides=(1, 1), padding="same", name="gen_v_conv")(x)

    q = split_heads_2d(q, n_heads)
    k = split_heads_2d(k, n_heads)
    v = split_heads_2d(v, n_heads)

    k_depth_per_head = d_model // n_heads
    q *= k_depth_per_head**-0.5

    
    """
    # local attetion 2d
    v_shape = K.int_shape(v)
    q = pad_to_multiple(q, query_shape)
    k = pad_to_multiple(k, query_shape)
    v = pad_to_multiple(v, query_shape)

    paddings = ((0, 0), (memory_flange[0], memory_flange[1]), (memory_flange[0], memory_flange[1]))
    k = L.ZeroPadding3D(padding=paddings)(k)
    v = L.ZeroPadding3D(padding=paddings)(v)
    
    # Set up query blocks
    q_indices = gather_indices_2d(q, query_shape, query_shape)
    q_new = gather_blocks_2d(q, q_indices)

    # Set up key and value blocks
    memory_shape = (query_shape[0] + 2*memory_flange[0],
                    query_shape[1] + 2*memory_flange[1])
    k_and_v_indices = gather_indices_2d(k, memory_shape, query_shape)
    k_new = gather_blocks_2d(k, k_and_v_indices)
    v_new = gather_blocks_2d(v, k_and_v_indices)

    output = dot_attention(q_new, k_new, v_new)

    # Put output back into original shapes
    padded_shape = K.shape(q)
    output = scatter_blocks_2d(output, q_indices, padded_shape) 

    # Remove padding
    output = K.slice(output, [0, 0, 0, 0, 0], [-1, -1, v_shape[2], v_shape[3], -1])
    """

    output = local_attention_2d(q, k, v, query_shape=query_shape, memory_flange=memory_flange)
    
    output = combine_heads_2d(output)
    output = Conv2D(out_channel, (3, 3), strides=(1, 1), padding="same", use_bias=False)(output)
    
    return output 
开发者ID:BreezeWhite,项目名称:Music-Transcription-with-Semantic-Segmentation,代码行数:53,代码来源:model_attn.py

示例5: build

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import ZeroPadding3D [as 别名]
def build(video_shape, audio_spectrogram_size):
		model = Sequential()

		model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero1', input_shape=video_shape))
		model.add(Convolution3D(32, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv1'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max1'))
		model.add(Dropout(0.25))

		model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero2'))
		model.add(Convolution3D(64, (3, 5, 5), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv2'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max2'))
		model.add(Dropout(0.25))

		model.add(ZeroPadding3D(padding=(1, 1, 1), name='zero3'))
		model.add(Convolution3D(128, (3, 3, 3), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv3'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max3'))
		model.add(Dropout(0.25))

		model.add(TimeDistributed(Flatten(), name='time'))

		model.add(Dense(1024, kernel_initializer='he_normal', name='dense1'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(Dropout(0.25))

		model.add(Dense(1024, kernel_initializer='he_normal', name='dense2'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(Dropout(0.25))

		model.add(Flatten())

		model.add(Dense(2048, kernel_initializer='he_normal', name='dense3'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(Dropout(0.25))

		model.add(Dense(2048, kernel_initializer='he_normal', name='dense4'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(Dropout(0.25))

		model.add(Dense(audio_spectrogram_size, name='output'))

		model.summary()

		return VideoToSpeechNet(model) 
开发者ID:avivga,项目名称:cocktail-party,代码行数:55,代码来源:network.py

示例6: pooling

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import ZeroPadding3D [as 别名]
def pooling(layer, layer_in, layerId, tensor=True):
    poolMap = {
        ('1D', 'MAX'): MaxPooling1D,
        ('2D', 'MAX'): MaxPooling2D,
        ('3D', 'MAX'): MaxPooling3D,
        ('1D', 'AVE'): AveragePooling1D,
        ('2D', 'AVE'): AveragePooling2D,
        ('3D', 'AVE'): AveragePooling3D,
    }
    out = {}
    layer_type = layer['params']['layer_type']
    pool_type = layer['params']['pool']
    padding = get_padding(layer)
    if (layer_type == '1D'):
        strides = layer['params']['stride_w']
        kernel = layer['params']['kernel_w']
        if (padding == 'custom'):
            p_w = layer['params']['pad_w']
            out[layerId + 'Pad'] = ZeroPadding1D(padding=p_w)(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    elif (layer_type == '2D'):
        strides = (layer['params']['stride_h'], layer['params']['stride_w'])
        kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'])
        if (padding == 'custom'):
            p_h, p_w = layer['params']['pad_h'], layer['params']['pad_w']
            out[layerId + 'Pad'] = ZeroPadding2D(padding=(p_h, p_w))(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    else:
        strides = (layer['params']['stride_h'], layer['params']['stride_w'],
                   layer['params']['stride_d'])
        kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'],
                  layer['params']['kernel_d'])
        if (padding == 'custom'):
            p_h, p_w, p_d = layer['params']['pad_h'], layer['params']['pad_w'],\
                layer['params']['pad_d']
            out[layerId +
                'Pad'] = ZeroPadding3D(padding=(p_h, p_w, p_d))(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    # Note - figure out a permanent fix for padding calculation of layers
    # in case padding is given in layer attributes
    # if ('padding' in layer['params']):
    #    padding = layer['params']['padding']
    out[layerId] = poolMap[(layer_type, pool_type)](
        pool_size=kernel, strides=strides, padding=padding)
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out


# ********** Locally-connected Layers ********** 
开发者ID:Cloud-CV,项目名称:Fabrik,代码行数:55,代码来源:layers_export.py


注:本文中的keras.layers.ZeroPadding3D方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。