当前位置: 首页>>代码示例>>Python>>正文


Python layers.Concatenate方法代码示例

本文整理汇总了Python中keras.layers.Concatenate方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Concatenate方法的具体用法?Python layers.Concatenate怎么用?Python layers.Concatenate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.layers的用法示例。


在下文中一共展示了layers.Concatenate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_discriminator

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def build_discriminator(self):

        def d_layer(layer_input, filters, f_size=4, bn=True):
            """Discriminator layer"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if bn:
                d = BatchNormalization(momentum=0.8)(d)
            return d

        img_A = Input(shape=self.img_shape)
        img_B = Input(shape=self.img_shape)

        # Concatenate image and conditioning image by channels to produce input
        combined_imgs = Concatenate(axis=-1)([img_A, img_B])

        d1 = d_layer(combined_imgs, self.df, bn=False)
        d2 = d_layer(d1, self.df*2)
        d3 = d_layer(d2, self.df*4)
        d4 = d_layer(d3, self.df*8)

        validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)

        return Model([img_A, img_B], validity) 
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:26,代码来源:pix2pix.py

示例2: InceptionLayer

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def InceptionLayer(self, a, b, c, d):
        def func(x):
            x1 = Conv2D(a, (1, 1), padding='same', activation='relu')(x)
            
            x2 = Conv2D(b, (1, 1), padding='same', activation='relu')(x)
            x2 = Conv2D(b, (3, 3), padding='same', activation='relu')(x2)
            
            x3 = Conv2D(c, (1, 1), padding='same', activation='relu')(x)
            x3 = Conv2D(c, (3, 3), dilation_rate = 2, strides = 1, padding='same', activation='relu')(x3)
            
            x4 = Conv2D(d, (1, 1), padding='same', activation='relu')(x)
            x4 = Conv2D(d, (3, 3), dilation_rate = 3, strides = 1, padding='same', activation='relu')(x4)

            y = Concatenate(axis = -1)([x1, x2, x3, x4])
            
            return y
        return func 
开发者ID:DariusAf,项目名称:MesoNet,代码行数:19,代码来源:classifiers.py

示例3: yolo_body

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V3 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body(inputs))
    x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))

    x = compose(
            DarknetConv2D_BN_Leaky(256, (1,1)),
            UpSampling2D(2))(x)
    x = Concatenate()([x,darknet.layers[152].output])
    x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))

    x = compose(
            DarknetConv2D_BN_Leaky(128, (1,1)),
            UpSampling2D(2))(x)
    x = Concatenate()([x,darknet.layers[92].output])
    x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))

    return Model(inputs, [y1,y2,y3]) 
开发者ID:bing0037,项目名称:keras-yolo3,代码行数:20,代码来源:model.py

示例4: get_data_helper

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def get_data_helper(self, data, data_type):
        """
        A helper function for data generation that combines different data types into a single representation
        :param data: A dictionary of different data types
        :param data_type: The data types defined for encoder and decoder input/output
        :return: A unified data representation as a list
        """
        if not data_type:
            return []
        d = []
        for dt in data_type:
            if dt == 'image':
                continue
            d.append(np.array(data[dt]))

        #  Concatenate different data points into a single representation
        if len(d) > 1:
            return np.concatenate(d, axis=2)
        else:
            return d[0] 
开发者ID:aras62,项目名称:PIEPredict,代码行数:22,代码来源:pie_predict.py

示例5: build_mbllen

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def build_mbllen(input_shape):

    def EM(input, kernal_size, channel):
        conv_1 = Conv2D(channel, (3, 3), activation='relu', padding='same', data_format='channels_last')(input)
        conv_2 = Conv2D(channel, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_1)
        conv_3 = Conv2D(channel*2, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_2)
        conv_4 = Conv2D(channel*4, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_3)
        conv_5 = Conv2DTranspose(channel*2, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_4)
        conv_6 = Conv2DTranspose(channel, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_5)
        res = Conv2DTranspose(3, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_6)
        return res

    inputs = Input(shape=input_shape)
    FEM = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_last')(inputs)
    EM_com = EM(FEM, 5, 8)

    for j in range(3):
        for i in range(0, 3):
            FEM = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_last')(FEM)
            EM1 = EM(FEM, 5, 8)
            EM_com = Concatenate(axis=3)([EM_com, EM1])

    outputs = Conv2D(3, (1, 1), activation='relu', padding='same', data_format='channels_last')(EM_com)
    return Model(inputs, outputs) 
开发者ID:Lvfeifan,项目名称:MBLLEN,代码行数:26,代码来源:Network.py

示例6: VariousConv1D

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def VariousConv1D(x, filter_sizes, num_filters, name_prefix=''):
    '''
    Layer wrapper function for various filter sizes Conv1Ds
    # Arguments:
        x: tensor, shape = (B, T, E)
        filter_sizes: list of int, list of each Conv1D filter sizes
        num_filters: list of int, list of each Conv1D num of filters
        name_prefix: str, layer name prefix
    # Returns:
        out: tensor, shape = (B, sum(num_filters))
    '''
    conv_outputs = []
    for filter_size, n_filter in zip(filter_sizes, num_filters):
        conv_name = '{}VariousConv1D/Conv1D/filter_size_{}'.format(name_prefix, filter_size)
        pooling_name = '{}VariousConv1D/MaxPooling/filter_size_{}'.format(name_prefix, filter_size)
        conv_out = Conv1D(n_filter, filter_size, name=conv_name)(x)   # (B, time_steps, n_filter)
        conv_out = GlobalMaxPooling1D(name=pooling_name)(conv_out) # (B, n_filter)
        conv_outputs.append(conv_out)
    concatenate_name = '{}VariousConv1D/Concatenate'.format(name_prefix)
    out = Concatenate(name=concatenate_name)(conv_outputs)
    return out 
开发者ID:tyo-yo,项目名称:SeqGAN,代码行数:23,代码来源:models.py

示例7: model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def model(self):


        input_A = Input(shape=self.SHAPE)
        input_B = Input(shape=self.SHAPE)
        input_layer = Concatenate(axis=-1)([input_A, input_B])

        up_layer_1 = Convolution2D(self.FS, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(input_layer)

        up_layer_2 = Convolution2D(self.FS*2, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(up_layer_1)
        leaky_layer_2 =  BatchNormalization(momentum=0.8)(up_layer_2)

        up_layer_3 = Convolution2D(self.FS*4, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(leaky_layer_2)
        leaky_layer_3 =  BatchNormalization(momentum=0.8)(up_layer_3)

        up_layer_4 = Convolution2D(self.FS*8, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(leaky_layer_3)
        leaky_layer_4 = BatchNormalization(momentum=0.8)(up_layer_4)

        output_layer = Convolution2D(1, kernel_size=4, strides=1, padding='same')(leaky_layer_4)
        
        return Model([input_A, input_B],output_layer) 
开发者ID:PacktPublishing,项目名称:Generative-Adversarial-Networks-Cookbook,代码行数:23,代码来源:discriminator.py

示例8: creat_discriminator

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def creat_discriminator(self):
        # layer 0
        image_A = Input(shape=self.image_shape)
        image_B = Input(shape=self.image_shape)
        combined_images = Concatenate(axis=-1)([image_A, image_B])
        # layer 1
        d1 = Conv2D(filters=64, kernel_size=4, strides=2, padding='same')(combined_images)
        d1 = LeakyReLU(alpha=0.2)(d1)
        # layer 2
        d2 = Conv2D(filters=128, kernel_size=4, strides=2, padding='same')(d1)
        d2 = LeakyReLU(alpha=0.2)(d2)
        d2 = BatchNormalization(momentum=0.8)(d2)
        # layer 3
        d3 = Conv2D(filters=128, kernel_size=4, strides=2, padding='same')(d2)
        d3 = LeakyReLU(alpha=0.2)(d3)
        d3 = BatchNormalization(momentum=0.8)(d3)
        # layer 4
        d4 = Conv2D(filters=128, kernel_size=4, strides=2, padding='same')(d3)
        d4 = LeakyReLU(alpha=0.2)(d4)
        d4 = BatchNormalization(momentum=0.8)(d4)
        validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
        return Model([image_A, image_B], validity) 
开发者ID:wmylxmj,项目名称:Pix2Pix-Keras,代码行数:24,代码来源:model.py

示例9: yolo_body

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V3 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body(inputs))
    x, y1 = make_last_layers(
        darknet.output, 512, num_anchors * (num_classes + 5))

    x = compose(
        DarknetConv2D_BN_Leaky(256, (1, 1)),
        UpSampling2D(2))(x)
    x = Concatenate()([x, darknet.layers[152].output])
    x, y2 = make_last_layers(x, 256, num_anchors * (num_classes + 5))

    x = compose(
        DarknetConv2D_BN_Leaky(128, (1, 1)),
        UpSampling2D(2))(x)
    x = Concatenate()([x, darknet.layers[92].output])
    x, y3 = make_last_layers(x, 128, num_anchors * (num_classes + 5))

    return Model(inputs, [y1, y2, y3]) 
开发者ID:advboxes,项目名称:perceptron-benchmark,代码行数:21,代码来源:model.py

示例10: yolo_main

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def yolo_main(input, num_anchors, num_classes):

    darknet_network = Model(input, darknet(input))

    network, network_1 = last_layers(darknet_network.output, 512, num_anchors * (num_classes + 5), layer_name="last1")

    network = NetworkConv2D_BN_Leaky( input=network, channels=256, kernel_size=(1,1))
    network = UpSampling2D(2)(network)
    network = Concatenate()([network, darknet_network.layers[152].output])

    network, network_2 = last_layers(network,  256,  num_anchors * (num_classes + 5), layer_name="last2")

    network = NetworkConv2D_BN_Leaky(input=network, channels=128, kernel_size=(1, 1))
    network = UpSampling2D(2)(network)
    network = Concatenate()([network, darknet_network.layers[92].output])

    network, network_3 = last_layers(network, 128, num_anchors * (num_classes + 5), layer_name="last3")

    return Model(input, [network_1, network_2, network_3]) 
开发者ID:OlafenwaMoses,项目名称:ImageAI,代码行数:21,代码来源:models.py

示例11: model_inputs

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def model_inputs(self, input_shape, conditions_shape=None):
        """
        :param input_shape: np.array
            (window_size, n_features)
        :param conditions_shape: np.array
            (horizon, n_features)
        :return: a tuple containing:
            - a list containing all the Input Layers needed by the model
            - the tensor that has to be feeded to the subsequent layers of the archotecture
        """
        inputs = Input(shape=input_shape, name='input')
        if conditions_shape is not None:
            conditions = Input(shape=conditions_shape, name='exogenous')
            # pass through different filters in order for them to have = no. channels
            out = Concatenate(axis=1)(
                [Dense(units=128, activation='sigmoid')(inputs),
                 Dense(units=128, activation='tanh')(conditions)]
            )  # concatenate over temporal axis
            return [inputs, conditions], out
        return inputs, inputs 
开发者ID:albertogaspar,项目名称:dts,代码行数:22,代码来源:FFNN.py

示例12: _get_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def _get_model(X, cat_cols, num_cols, n_uniq, n_emb, output_activation):
        inputs = []
        num_inputs = []
        embeddings = []
        for i, col in enumerate(cat_cols):

            if not n_uniq[i]:
                n_uniq[i] = X[col].nunique()
            if not n_emb[i]:
                n_emb[i] = max(MIN_EMBEDDING, 2 * int(np.log2(n_uniq[i])))

            _input = Input(shape=(1,), name=col)
            _embed = Embedding(input_dim=n_uniq[i], output_dim=n_emb[i], name=col + EMBEDDING_SUFFIX)(_input)
            _embed = Dropout(.2)(_embed)
            _embed = Reshape((n_emb[i],))(_embed)

            inputs.append(_input)
            embeddings.append(_embed)

        if num_cols:
            num_inputs = Input(shape=(len(num_cols),), name='num_inputs')
            merged_input = Concatenate(axis=1)(embeddings + [num_inputs])

            inputs = inputs + [num_inputs]
        else:
            merged_input = Concatenate(axis=1)(embeddings)

        x = BatchNormalization()(merged_input)
        x = Dense(128, activation='relu')(x)
        x = Dropout(.5)(x)
        x = BatchNormalization()(x)
        x = Dense(64, activation='relu')(x)
        x = Dropout(.5)(x)
        x = BatchNormalization()(x)
        output = Dense(1, activation=output_activation)(x)

        model = Model(inputs=inputs, outputs=output)

        return model, n_emb, n_uniq 
开发者ID:jeongyoonlee,项目名称:Kaggler,代码行数:41,代码来源:categorical.py

示例13: build

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def build(self, input_shape):
        self._validate_input_shape(input_shape)
        
        d_k = self._d_k if self._d_k else input_shape[1][-1]
        d_model = self._d_model if self._d_model else input_shape[1][-1]
        d_v = self._d_v

        if type(d_k) == tf.Dimension:
            d_k = d_k.value
        if type(d_model) == tf.Dimension:
            d_model = d_model.value
        
        self._q_layers = []
        self._k_layers = []
        self._v_layers = []
        self._sdp_layer = ScaledDotProductAttention(return_attention=self._return_attention)
    
        for _ in range(self._h):
            self._q_layers.append(
                TimeDistributed(
                    Dense(d_k, activation=self._activation, use_bias=False)
                )
            )
            self._k_layers.append(
                TimeDistributed(
                    Dense(d_k, activation=self._activation, use_bias=False)
                )
            )
            self._v_layers.append(
                TimeDistributed(
                    Dense(d_v, activation=self._activation, use_bias=False)
                )
            )
        
        self._output = TimeDistributed(Dense(d_model))
        #if self._return_attention:
        #    self._output = Concatenate() 
开发者ID:zimmerrol,项目名称:keras-utility-layer-collection,代码行数:39,代码来源:attention.py

示例14: __call__

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def __call__(self, x, mask=None):
        if isinstance(x, (list, tuple)):
            self.build([it.shape for it in x])
        else:
            self.build(x.shape)

        q, k, v = x
        
        outputs = []
        attentions = []
        for i in range(self._h):
            qi = self._q_layers[i](q)
            ki = self._k_layers[i](k)
            vi = self._v_layers[i](v)
            
            if self._return_attention:
                output, attention = self._sdp_layer([qi, ki, vi], mask=mask)
                outputs.append(output)
                attentions.append(attention)
            else:
                output = self._sdp_layer([qi, ki, vi], mask=mask)
                outputs.append(output)
            
        concatenated_outputs = Concatenate()(outputs)
        output = self._output(concatenated_outputs)
        
        if self._return_attention:
            attention = Concatenate()(attentions)
            # print("attention", attention, attention.shape)
       
        if self._return_attention:
            return [output, attention]
        else:
            return output        

# https://wanasit.github.io/attention-based-sequence-to-sequence-in-keras.html
# https://arxiv.org/pdf/1508.04025.pdf 
开发者ID:zimmerrol,项目名称:keras-utility-layer-collection,代码行数:39,代码来源:attention.py

示例15: build_generator

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Concatenate [as 别名]
def build_generator(self):
        """U-Net Generator"""

        def conv2d(layer_input, filters, f_size=4, bn=True):
            """Layers used during downsampling"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if bn:
                d = BatchNormalization(momentum=0.8)(d)
            return d

        def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
            """Layers used during upsampling"""
            u = UpSampling2D(size=2)(layer_input)
            u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
            if dropout_rate:
                u = Dropout(dropout_rate)(u)
            u = BatchNormalization(momentum=0.8)(u)
            u = Concatenate()([u, skip_input])
            return u

        img = Input(shape=self.img_shape)

        # Downsampling
        d1 = conv2d(img, self.gf, bn=False)
        d2 = conv2d(d1, self.gf*2)
        d3 = conv2d(d2, self.gf*4)
        d4 = conv2d(d3, self.gf*8)

        # Upsampling
        u1 = deconv2d(d4, d3, self.gf*4)
        u2 = deconv2d(u1, d2, self.gf*2)
        u3 = deconv2d(u2, d1, self.gf)

        u4 = UpSampling2D(size=2)(u3)
        output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4)

        return Model(img, output_img) 
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:40,代码来源:ccgan.py


注:本文中的keras.layers.Concatenate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。