當前位置: 首頁>>代碼示例>>Python>>正文


Python core.Permute方法代碼示例

本文整理匯總了Python中keras.layers.core.Permute方法的典型用法代碼示例。如果您正苦於以下問題:Python core.Permute方法的具體用法?Python core.Permute怎麽用?Python core.Permute使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.layers.core的用法示例。


在下文中一共展示了core.Permute方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_shallow_convnet

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def get_shallow_convnet(window_size=4096, channels=2, output_size=84):
    inputs = Input(shape=(window_size, channels))

    conv = ComplexConv1D(
        32, 512, strides=16,
        activation='relu')(inputs)
    pool = AveragePooling1D(pool_size=4, strides=2)(conv)

    pool = Permute([2, 1])(pool)
    flattened = Flatten()(pool)

    dense = ComplexDense(2048, activation='relu')(flattened)
    predictions = ComplexDense(
        output_size, 
        activation='sigmoid',
        bias_initializer=Constant(value=-5))(dense)
    predictions = GetReal(predictions)
    model = Model(inputs=inputs, outputs=predictions)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model 
開發者ID:ChihebTrabelsi,項目名稱:deep_complex_networks,代碼行數:25,代碼來源:__init__.py

示例2: test_permute

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def test_permute(self):
        """
        Test the conversion of pooling layer.
        """
        from keras.layers.core import Permute

        # Create a simple Keras model
        model = Sequential()
        model.add(Permute((3, 2, 1), input_shape=(10, 64, 3)))

        input_names = ["input"]
        output_names = ["output"]
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField("neuralNetwork"))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names))
        six.assertCountEqual(
            self, input_names, [x.name for x in spec.description.input]
        )
        self.assertEquals(len(spec.description.output), len(output_names))
        six.assertCountEqual(
            self, output_names, [x.name for x in spec.description.output]
        )

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.permute) 
開發者ID:apple,項目名稱:coremltools,代碼行數:35,代碼來源:test_keras.py

示例3: test_permute

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def test_permute(self):
        """
        Test the conversion of pooling layer.
        """
        from keras.layers.core import Permute

        # Create a simple Keras model
        model = Sequential()
        model.add(Permute((3, 2, 1), input_shape=(10, 64, 3)))

        input_names = ["input"]
        output_names = ["output"]
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField("neuralNetwork"))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names))
        self.assertEqual(
            sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))
        )
        self.assertEquals(len(spec.description.output), len(output_names))
        self.assertEqual(
            sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))
        )

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.permute) 
開發者ID:apple,項目名稱:coremltools,代碼行數:35,代碼來源:test_keras2.py

示例4: create_model

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def create_model(input_shape, config, is_training=True):

    weight_decay = 0.001

    model = Sequential()

    model.add(Convolution2D(16, 7, 7, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(32, 5, 5, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(64, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # (bs, y, x, c) --> (bs, x, y, c)
    model.add(Permute((2, 1, 3)))

    # (bs, x, y, c) --> (bs, x, y * c)
    bs, x, y, c = model.layers[-1].output_shape
    model.add(Reshape((x, y*c)))

    model.add(Bidirectional(LSTM(512, return_sequences=False), merge_mode="concat"))
    model.add(Dense(config["num_classes"], activation="softmax"))

    return model 
開發者ID:HPI-DeepLearning,項目名稱:crnn-lid,代碼行數:39,代碼來源:topcoder_crnn.py

示例5: dense_cnn

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def dense_cnn(input, nclass):

    _dropout_rate = 0.2
    _weight_decay = 1e-4

    _nb_filter = 64
    # conv 64  5*5 s=2
    x = Conv2D(_nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
               use_bias=False, kernel_regularizer=l2(_weight_decay))(input)

    # 64 +  8 * 8 = 128
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    #128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    #128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    #192->128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    #128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    x = Permute((2, 1, 3), name='permute')(x)
    x = TimeDistributed(Flatten(), name='flatten')(x)
    y_pred = Dense(nclass, name='out', activation='softmax')(x)

    basemodel = Model(inputs=input,outputs=y_pred)
    basemodel.summary()
    return basemodel 
開發者ID:jarvisqi,項目名稱:deep_learning,代碼行數:35,代碼來源:densenet.py

示例6: dense_cnn

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def dense_cnn(input, nclass):

    _dropout_rate = 0.2 
    _weight_decay = 1e-4

    _nb_filter = 64
    # conv 64 5*5 s=2
    x = Conv2D(_nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
               use_bias=False, kernel_regularizer=l2(_weight_decay))(input)
   
    # 64 + 8 * 8 = 128
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 192 -> 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    x = Permute((2, 1, 3), name='permute')(x)
    x = TimeDistributed(Flatten(), name='flatten')(x)
    y_pred = Dense(nclass, name='out', activation='softmax')(x)

    # basemodel = Model(inputs=input, outputs=y_pred)
    # basemodel.summary()

    return y_pred 
開發者ID:YCG09,項目名稱:chinese_ocr,代碼行數:36,代碼來源:densenet.py

示例7: dense_cnn

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def dense_cnn(input, nclass):
    _dropout_rate = 0.2
    _weight_decay = 1e-4

    _nb_filter = 64
    # conv 64 5*5 s=2
    x = Conv2D(_nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
               use_bias=False, kernel_regularizer=l2(_weight_decay))(input)

    # 64 + 8 * 8 = 128
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 192 -> 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    x = Permute((2, 1, 3), name='permute')(x)
    x = TimeDistributed(Flatten(), name='flatten')(x)
    y_pred = Dense(nclass, name='out', activation='softmax')(x)

    # basemodel = Model(inputs=input, outputs=y_pred)
    # basemodel.summary()

    return y_pred


# input = Input(shape=(32, 280, 1), name='the_input')
# dense_cnn(input, 5000) 
開發者ID:bing1zhi2,項目名稱:chinese_ocr,代碼行數:39,代碼來源:densenet.py

示例8: sentence_embedding

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def sentence_embedding(sentence_len, wv_params, wv_size,
                       input_name='sentence_embedding', output_name='vector_embedding'):
    '''
    Creates an embedding of word vectors into a sentence image.

    Args:
    -----
        sentence_len: length of sentences to be passed

        wv_params: a dict of the following format

                        wv_params = {
                            'fixed_wv' : 
                            {
                                'vocab_size' : 1000,
                                'init' : None,
                                'fixed' : True
                            },
                            'floating_wv' : 
                            {
                                'vocab_size' : 1000,
                                'init' : None,
                                'fixed' : False
                            }
                        }
            the keys of the dictionary are the names in the keras graph model, and
            you can have any number of word vector layers encoded.

        input_name: the name of the input node for the graph

        output_name: the name of the output node for the graph

    Returns:
    --------

        a keras container that takes as input an integer array with shape (n_samples, n_words), and returns 
        shape (n_samples, wv_channels, len_sentence, wv_dim)!
    '''
    # -- output is (n_samples, n_channels, n_words, wv_dim)
    g = SubGraph()

    if KERAS_BACKEND:
        g.add_input(input_name, (sentence_len, ), dtype='int')
    else:
        g.add_input(input_name, (-1, ), dtype='int')

    for name, params in wv_params.iteritems():
        # g.add_input(params['input_name'], (-1, ), dtype='int')
        g.add_node(make_embedding(wv_size=wv_size, **params), name=name, input=input_name)

    g.add_node(Reshape((sentence_len, len(wv_params), wv_size)), name='reshape',
               inputs=wv_params.keys(), merge_mode='concat')
    g.add_node(Permute(dims=(2, 1, 3)), name='permute', input='reshape')
    
    # -- output is of shape (nb_samples, nb_wv_channels, len_sentence, wv_dim)
    g.add_output(name=output_name, input='permute')
    return g 
開發者ID:textclf,項目名稱:fancy-cnn,代碼行數:59,代碼來源:embeddings.py

示例9: cnn

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def cnn(embedding_matrix, dimx=50, dimy=50, nb_filter = 120, 
        embedding_dim = 50,filter_length = (50,4), vocab_size = 8000, depth = 1):

    print 'Model Uses Basic CNN......'
    
    inpx = Input(shape=(dimx,),dtype='int32',name='inpx')   
    inpy = Input(shape=(dimy,),dtype='int32',name='inpy')
    
    x = word2vec_embedding_layer(embedding_matrix,train=False)(inpx)
    y = word2vec_embedding_layer(embedding_matrix,train=False)(inpy)
    
    x = Permute((2,1))(x)
    y = Permute((2,1))(y)

    conv1 = Reshape((embedding_dim,dimx,1))(x)
    conv2 = Reshape((embedding_dim,dimy,1))(y)   
       
    channel_1, channel_2 = [], []
    
    for dep in range(depth):
        
        #conv1 = ZeroPadding2D((filter_width - 1, 0))(conv1)
        #conv2 = ZeroPadding2D((filter_width - 1, 0))(conv2)
        

        ques = Conv2D(nb_filter=nb_filter, kernel_size = filter_length, activation='relu',
                data_format = 'channels_last',border_mode="valid")(conv1)
        ans = Conv2D(nb_filter, kernel_size = filter_length, activation='relu',
                data_format="channels_last",border_mode="valid")(conv2)
                    
            
        #conv1 = GlobalMaxPooling2D()(ques)
        #conv2 = GlobalMaxPooling2D()(ans)
        #conv1 = MaxPooling2D()(ques)
        #conv2 = MaxPooling2D()(ans)
        
        channel_1.append(GlobalMaxPooling2D()(ques))
        channel_2.append(GlobalMaxPooling2D()(ans))
        
        #channel_1.append(GlobalAveragePooling2D()(ques))
        #channel_2.append(GlobalAveragePooling2D()(ans))
    
    h1 = channel_1.pop(-1)
    if channel_1:
        h1 = merge([h1] + channel_1, mode="concat")

    h2 = channel_2.pop(-1)
    if channel_2:
        h2 = merge([h2] + channel_2, mode="concat")
    
    h =  Merge(mode="concat",name='h')([h1, h2])
    #h = Dropout(0.2)(h)
    #h = Dense(50, kernel_regularizer=regularizers.l2(reg2),activation='relu')(h)
    #wrap = Dropout(0.5)(h)
    #wrap = Dense(64, activation='tanh')(h)   
    
    score = Dense(2,activation='softmax',name='score')(h)
    model = Model([inpx, inpy],[score])
    model.compile( loss='categorical_crossentropy',optimizer='adam')
    
    return model 
開發者ID:GauravBh1010tt,項目名稱:DeepLearn,代碼行數:63,代碼來源:model.py

示例10: get_deep_convnet

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def get_deep_convnet(window_size=4096, channels=2, output_size=84):
    inputs = Input(shape=(window_size, channels))
    outs = inputs

    outs = (ComplexConv1D(
        16, 6, strides=2, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(
        32, 3, strides=2, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)
    
    outs = (ComplexConv1D(
        64, 3, strides=1, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(
        64, 3, strides=1, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(
        128, 3, strides=1, padding='same',
        activation='relu',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexConv1D(
        128, 3, strides=1, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    #outs = (keras.layers.MaxPooling1D(pool_size=2))
    #outs = (Permute([2, 1]))
    outs = (keras.layers.Flatten())(outs)
    outs = (keras.layers.Dense(2048, activation='relu',
                           kernel_initializer='glorot_normal'))(outs)
    predictions = (keras.layers.Dense(output_size, activation='sigmoid',
                                 bias_initializer=keras.initializers.Constant(value=-5)))(outs)

    model = Model(inputs=inputs, outputs=predictions)
    model.compile(optimizer=keras.optimizers.Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model 
開發者ID:ChihebTrabelsi,項目名稱:deep_complex_networks,代碼行數:63,代碼來源:__init__.py

示例11: Unet

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def Unet(nClasses, optimizer=None, input_length=1800, nChannels=1):
    inputs = Input((input_length, nChannels))
    conv1 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
    conv1 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling1D(pool_size=2)(conv1)

    conv2 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling1D(pool_size=2)(conv2)
    
    conv3 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
    conv3 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling1D(pool_size=2)(conv3)

    conv4 = Conv1D(128, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
    conv4 = Dropout(0.5)(conv4)
    conv4 = Conv1D(128, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)

    up1 = Conv1D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling1D(size=2)(conv4))
    merge1 = concatenate([up1, conv3], axis=-1)
    conv5 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
    conv5 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
    
    up2 = Conv1D(32, 2, activation='relu', padding='same', kernel_initializer = 'he_normal')(UpSampling1D(size=2)(conv5))
    merge2 = concatenate([up2, conv2], axis=-1)
    conv6 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer = 'he_normal')(merge2)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Conv1D(32, 32, activation='relu', padding='same')(conv6)
    
    up3 = Conv1D(16, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling1D(size=2)(conv6))
    merge3 = concatenate([up3, conv1], axis=-1)
    conv7 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(merge3)
    conv7 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
    
    conv8 = Conv1D(nClasses, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
    conv8 = core.Reshape((nClasses, input_length))(conv8)
    conv8 = core.Permute((2, 1))(conv8)

    conv9 = core.Activation('softmax')(conv8)

    model = Model(inputs=inputs, outputs=conv9)
    if not optimizer is None:
        model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=['accuracy'])

    return model 
開發者ID:Aiwiscal,項目名稱:ECG_UNet,代碼行數:48,代碼來源:Unet.py

示例12: get_tiramisu

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def get_tiramisu(self):
		model = self.model = models.Sequential()
		# cropping
		# model.add(Cropping2D(cropping=((68, 68), (128, 128)), input_shape=(3, 360,480)))

		model.add(Conv2D(48, 
						kernel_size=(3, 3), 
						padding='same', 
						input_shape=(self.img_rows, self.img_cols, self.num_channels),
						kernel_initializer="he_uniform",
						kernel_regularizer = l2(0.0001),
						data_format='channels_last'))

		self.DenseBlock(5,108) # 5*12 = 60 + 48 = 108
		self.TransitionDown(108)
		self.DenseBlock(5,168) # 5*12 = 60 + 108 = 168
		self.TransitionDown(168)
		self.DenseBlock(5,228) # 5*12 = 60 + 168 = 228
		self.TransitionDown(228)
		self.DenseBlock(5,288)# 5*12 = 60 + 228 = 288
		self.TransitionDown(288)
		self.DenseBlock(5,348) # 5*12 = 60 + 288 = 348
		self.TransitionDown(348)

		self.DenseBlock(15,408) # m = 348 + 5*12 = 408

		self.TransitionUp(468, (468, self.img_rows/32, self.img_cols/32), (None, 468, self.img_rows/16, self.img_cols/16))
		self.DenseBlock(5,468)

		self.TransitionUp(408, (408, self.img_rows/16, self.img_cols/16), (None, 408, self.img_rows/8, self.img_cols/8))
		self.DenseBlock(5,408)

		self.TransitionUp(348, (348, self.img_rows/8, self.img_cols/8), (None, 348, self.img_rows/4, self.img_cols/4))
		self.DenseBlock(5,348)

		self.TransitionUp(288, (288, self.img_rows/4, self.img_cols/4), (None, 288, self.img_rows/2, self.img_cols/2))
		self.DenseBlock(5,288)

		self.TransitionUp(228, (228, self.img_rows/2, self.img_cols/2), (None, 228, self.img_rows, self.img_cols))
		self.DenseBlock(5,228)

		model.add(Conv2D(12, 
						kernel_size=(1,1), 
						padding='same',
						kernel_initializer="he_uniform",
						kernel_regularizer = l2(0.0001),
						data_format='channels_last'))
		
		model.add(Reshape((12, self.img_rows * self.img_cols)))
		model.add(Permute((2, 1)))
		model.add(Activation('sigmoid'))
		#model.summary()
		return model 
開發者ID:jackkwok,項目名稱:neural-road-inspector,代碼行數:55,代碼來源:tiramisu.py

示例13: fCreateModel

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def fCreateModel(patchSize, learningRate=1e-3, optimizer='SGD',
                 dr_rate=0.0, input_dr_rate=0.0, max_norm=5, iPReLU=0, l2_reg=1e-6):
    l2_reg = 1e-4

    # (4 stages-each 2 convs)(378,722 params)(for 40x40x10)
    input_t = Input(shape=(1, int(patchSize[0, 0]), int(patchSize[0, 1]), int(patchSize[0, 2])))
    input2D_t = Permute((4, 1, 2, 3))(input_t)
    input2D_t = Reshape(target_shape=(int(patchSize[0, 2]), int(patchSize[0, 0]), int(patchSize[0, 1])))(
        input2D_t)
    # use zDimension as number of channels
    twoD_t = Conv2D(16,
                    kernel_size=(7, 7),
                    padding='same',
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(l2_reg),
                    strides=(1, 1)
                    )(input2D_t)
    twoD_t = Activation('relu')(twoD_t)

    l_w2_t = fCreateMaxPooling2D(twoD_t, stride=(2, 2))
    l_w3_t = fCreateMaxPooling2D(l_w2_t, stride=(2, 2))
    l_w4_t = fCreateMaxPooling2D(l_w3_t, stride=(2, 2))

    stage1_res1_t = fCreateMNet_Block(twoD_t, 16, kernel_size=(3, 3), forwarding=True, l2_reg=l2_reg)
    stage1_res2_t = fCreateMNet_Block(stage1_res1_t, 32, kernel_size=(3, 3), forwarding=False, l2_reg=l2_reg)

    stage2_inp_t = fCreateMaxPooling2D(stage1_res2_t, stride=(2, 2))
    stage2_inp_t = concatenate([stage2_inp_t, l_w2_t], axis=1)
    stage2_res1_t = fCreateMNet_Block(stage2_inp_t, 32, l2_reg=l2_reg)
    stage2_res2_t = fCreateMNet_Block(stage2_res1_t, 48, forwarding=False)

    stage3_inp_t = fCreateMaxPooling2D(stage2_res2_t, stride=(2, 2))
    stage3_inp_t = concatenate([stage3_inp_t, l_w3_t], axis=1)
    stage3_res1_t = fCreateMNet_Block(stage3_inp_t, 48, l2_reg=l2_reg)
    stage3_res2_t = fCreateMNet_Block(stage3_res1_t, 64, forwarding=False, l2_reg=l2_reg)

    stage4_inp_t = fCreateMaxPooling2D(stage3_res2_t, stride=(2, 2))
    stage4_inp_t = concatenate([stage4_inp_t, l_w4_t], axis=1)
    stage4_res1_t = fCreateMNet_Block(stage4_inp_t, 64, l2_reg=l2_reg)
    stage4_res2_t = fCreateMNet_Block(stage4_res1_t, 128, forwarding=False, l2_reg=l2_reg)

    after_flat_t = Flatten()(stage4_res2_t)

    after_dense_t = Dense(units=2,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(l2_reg))(after_flat_t)
    output_t = Activation('softmax')(after_dense_t)

    cnn = Model(inputs=[input_t], outputs=[output_t])

    opti, loss = fGetOptimizerAndLoss(optimizer, learningRate=learningRate)
    cnn.compile(optimizer=opti, loss=loss, metrics=['accuracy'])
    sArchiSpecs = '3stages_l2{}'.format(l2_reg) 
開發者ID:thomaskuestner,項目名稱:CNNArt,代碼行數:55,代碼來源:MNetArt.py

示例14: fCreateModel

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def fCreateModel(patchSize, learningRate=1e-3, optimizer='SGD',
        dr_rate=0.0, input_dr_rate=0.0, max_norm=5, iPReLU=0, l2_reg=1e-6):
        l2_reg=1e-4

        #(4 stages-each 2 convs)(378,722 params)(for 40x40x10)
        input_t=Input(shape=(1,int(patchSize[0, 0]),int(patchSize[0, 1]), int(patchSize[0, 2])))
        input2D_t=Permute((4,1,2,3))(input_t)
        input2D_t=Reshape(target_shape=(int(patchSize[0, 2]),int(patchSize[0, 0]), int(patchSize[0, 1])))(
            input2D_t)
        #use zDimension as number of channels
        twoD_t=Conv2D(16,
                      kernel_size=(7,7),
                      padding='same',
                      kernel_initializer='he_normal',
                      kernel_regularizer=l2(l2_reg),
                      strides=(1,1)
                      )(input2D_t)
        twoD_t = Activation('relu')(twoD_t)

        l_w2_t = fCreateMaxPooling2D(twoD_t, stride=(2, 2))
        l_w3_t = fCreateMaxPooling2D(l_w2_t, stride=(2, 2))
        l_w4_t = fCreateMaxPooling2D(l_w3_t, stride=(2, 2))

        stage1_res1_t=fCreateMNet_Block(twoD_t,16,kernel_size=(3,3), forwarding=True, l2_reg=l2_reg)
        stage1_res2_t=fCreateMNet_Block(stage1_res1_t,32,kernel_size=(3,3), forwarding=False, l2_reg=l2_reg)

        stage2_inp_t=fCreateMaxPooling2D(stage1_res2_t, stride=(2,2))
        stage2_inp_t=concatenate([stage2_inp_t,l_w2_t], axis=1)
        stage2_res1_t=fCreateMNet_Block(stage2_inp_t,32,l2_reg=l2_reg)
        stage2_res2_t=fCreateMNet_Block(stage2_res1_t,48, forwarding=False)

        stage3_inp_t=fCreateMaxPooling2D(stage2_res2_t, stride=(2,2))
        stage3_inp_t=concatenate([stage3_inp_t,l_w3_t], axis=1)
        stage3_res1_t=fCreateMNet_Block(stage3_inp_t,48,l2_reg=l2_reg)
        stage3_res2_t = fCreateMNet_Block(stage3_res1_t, 64, forwarding=False,l2_reg=l2_reg)

        stage4_inp_t = fCreateMaxPooling2D(stage3_res2_t, stride=(2, 2))
        stage4_inp_t = concatenate([stage4_inp_t, l_w4_t], axis=1)
        stage4_res1_t = fCreateMNet_Block(stage4_inp_t, 64,l2_reg=l2_reg)
        stage4_res2_t = fCreateMNet_Block(stage4_res1_t, 128, forwarding=False,l2_reg=l2_reg)

        after_flat_t = Flatten()(stage4_res2_t)

        after_dense_t = Dense(units=2,
                              kernel_initializer='he_normal',
                              kernel_regularizer=l2(l2_reg))(after_flat_t)
        output_t = Activation('softmax')(after_dense_t)

        cnn = Model(inputs=[input_t], outputs=[output_t])

        opti, loss = fGetOptimizerAndLoss(optimizer, learningRate=learningRate)
        cnn.compile(optimizer=opti, loss=loss, metrics=['accuracy'])
        sArchiSpecs = '3stages_l2{}'.format(l2_reg) 
開發者ID:thomaskuestner,項目名稱:CNNArt,代碼行數:55,代碼來源:motion_MNetArt.py

示例15: create_model

# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Permute [as 別名]
def create_model(input_shape, config, is_training=True):

    weight_decay = 0.001

    model = Sequential()

    model.add(Convolution2D(64, 3, 3, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    # model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    # model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # (bs, y, x, c) --> (bs, x, y, c)
    model.add(Permute((2, 1, 3)))

    # (bs, x, y, c) --> (bs, x, y * c)
    bs, x, y, c = model.layers[-1].output_shape
    model.add(Reshape((x, y*c)))

    model.add(Bidirectional(LSTM(256, return_sequences=False), merge_mode="concat"))
    model.add(Dense(config["num_classes"], activation="softmax"))

    return model 
開發者ID:HPI-DeepLearning,項目名稱:crnn-lid,代碼行數:47,代碼來源:crnn.py


注:本文中的keras.layers.core.Permute方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。