本文整理汇总了Python中keras.layers.convolutional.Convolution2D方法的典型用法代码示例。如果您正苦于以下问题:Python convolutional.Convolution2D方法的具体用法?Python convolutional.Convolution2D怎么用?Python convolutional.Convolution2D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers.convolutional
的用法示例。
在下文中一共展示了convolutional.Convolution2D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def __init__(self, model_path=None):
if model_path is not None:
self.model = self.load_model(model_path)
else:
# VGG16 last conv features
inputs = Input(shape=(7, 7, 512))
x = Convolution2D(128, 1, 1)(inputs)
x = Flatten()(x)
# Cls head
h_cls = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
h_cls = Dropout(p=0.5)(h_cls)
cls_head = Dense(20, activation='softmax', name='cls')(h_cls)
# Reg head
h_reg = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
h_reg = Dropout(p=0.5)(h_reg)
reg_head = Dense(4, activation='linear', name='reg')(h_reg)
# Joint model
self.model = Model(input=inputs, output=[cls_head, reg_head])
示例2: get_residual_model
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28):
model = keras.models.Sequential()
first_layer_channel = 128
if is_mnist: # size to be changed to 32,32
model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32)
# the first conv
model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same'))
else:
model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols)))
model.add(Activation('relu'))
# [residual-based Conv layers]
residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel)
model.add(residual_blocks)
model.add(BatchNormalization(axis=1))
model.add(Activation('relu'))
# [Classifier]
model.add(Flatten())
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# [END]
return model
示例3: test_img_clf
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def test_img_clf(self):
print('image classification data:')
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(3, 32, 32),
classification=True, nb_class=2)
print('X_train:', X_train.shape)
print('X_test:', X_test.shape)
print('y_train:', y_train.shape)
print('y_test:', y_test.shape)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(Convolution2D(32, 3, 32, 32))
model.add(Activation('sigmoid'))
model.add(Flatten())
model.add(Dense(32, y_test.shape[-1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd')
history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
self.assertTrue(history.history['val_acc'][-1] > 0.9)
示例4: buildmodel
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def buildmodel():
print("Model building begins")
model = Sequential()
keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None)
S = Input(shape = (IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS, ), name = 'Input')
h0 = Convolution2D(16, kernel_size = (8,8), strides = (4,4), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(S)
h1 = Convolution2D(32, kernel_size = (4,4), strides = (2,2), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(h0)
h2 = Flatten()(h1)
h3 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h2)
P = Dense(1, name = 'o_P', activation = 'sigmoid', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3)
V = Dense(1, name = 'o_V', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3)
model = Model(inputs = S, outputs = [P,V])
rms = RMSprop(lr = LEARNING_RATE, rho = 0.99, epsilon = 0.1)
model.compile(loss = {'o_P': logloss, 'o_V': sumofsquares}, loss_weights = {'o_P': 1., 'o_V' : 0.5}, optimizer = rms)
return model
#function to preprocess an image before giving as input to the neural network
示例5: dc_model
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def dc_model(self):
model = Sequential()
model.add(Dense(256*8*8,activation=LeakyReLU(0.2), input_dim=self.LATENT_SPACE_SIZE))
model.add(BatchNormalization())
model.add(Reshape((8, 8, 256)))
model.add(UpSampling2D())
model.add(Convolution2D(128, 5, 5, border_mode='same',activation=LeakyReLU(0.2)))
model.add(BatchNormalization())
model.add(UpSampling2D())
model.add(Convolution2D(64, 5, 5, border_mode='same',activation=LeakyReLU(0.2)))
model.add(BatchNormalization())
model.add(UpSampling2D())
model.add(Convolution2D(self.C, 5, 5, border_mode='same', activation='tanh'))
return model
示例6: model
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def model(self):
input_layer = Input(self.SHAPE)
up_layer_1 = Convolution2D(64, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(input_layer)
up_layer_2 = Convolution2D(64*2, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(up_layer_1)
norm_layer_1 = InstanceNormalization()(up_layer_2)
up_layer_3 = Convolution2D(64*4, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(norm_layer_1)
norm_layer_2 = InstanceNormalization()(up_layer_3)
up_layer_4 = Convolution2D(64*8, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(norm_layer_2)
norm_layer_3 =InstanceNormalization()(up_layer_4)
output_layer = Convolution2D(1, kernel_size=4, strides=1, padding='same')(norm_layer_3)
output_layer_1 = Flatten()(output_layer)
output_layer_2 = Dense(1, activation='sigmoid')(output_layer_1)
return Model(input_layer,output_layer_2)
示例7: model
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def model(self):
input_A = Input(shape=self.SHAPE)
input_B = Input(shape=self.SHAPE)
input_layer = Concatenate(axis=-1)([input_A, input_B])
up_layer_1 = Convolution2D(self.FS, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(input_layer)
up_layer_2 = Convolution2D(self.FS*2, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(up_layer_1)
leaky_layer_2 = BatchNormalization(momentum=0.8)(up_layer_2)
up_layer_3 = Convolution2D(self.FS*4, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(leaky_layer_2)
leaky_layer_3 = BatchNormalization(momentum=0.8)(up_layer_3)
up_layer_4 = Convolution2D(self.FS*8, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(leaky_layer_3)
leaky_layer_4 = BatchNormalization(momentum=0.8)(up_layer_4)
output_layer = Convolution2D(1, kernel_size=4, strides=1, padding='same')(leaky_layer_4)
return Model([input_A, input_B],output_layer)
示例8: build_model
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def build_model(self):
model = Sequential()
model.add(Convolution2D(
16, 8, 8, input_shape=(self.num_frames,) + self.frame_dim,
subsample=(4, 4), activation="relu", init="he_uniform"
))
model.add(Convolution2D(
16, 4, 4, subsample=(2, 2), activation="relu", init="he_uniform"
))
model.add(Convolution2D(
32, 3, 3, subsample=(1, 1), activation="relu", init="he_uniform"
))
model.add(Flatten())
model.add(Dense(
512, activation="relu", init="he_uniform"
))
model.add(Dense(
self.num_actions, activation="linear", init="he_uniform"
))
model.compile(loss=self.q_loss, optimizer=self.optimizer)
self.model = model
示例9: conv_block
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def conv_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 3x3, Conv2D, optional dropout
Args:
ip: Input keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor with batch_norm, relu and convolution2d added
'''
x = Activation('relu')(ip)
x = Convolution2D(nb_filter, 3, 3, init="he_uniform", border_mode="same", bias=False,
W_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
示例10: transition_block
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def transition_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x = Convolution2D(nb_filter, 1, 1, init="he_uniform", border_mode="same", bias=False,
W_regularizer=l2(weight_decay))(ip)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
return x
示例11: expand_conv
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def expand_conv(init, base, k, strides=(1, 1)):
x = Convolution2D(base * k, (3, 3), padding='same', strides=strides, kernel_initializer='he_normal',
use_bias=False)(init)
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(x)
x = Activation('relu')(x)
x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal',
use_bias=False)(x)
skip = Convolution2D(base * k, (1, 1), padding='same', strides=strides, kernel_initializer='he_normal',
use_bias=False)(init)
m = Add()([x, skip])
return m
示例12: conv2d_bn
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def conv2d_bn(x, nb_filter, nb_row, nb_col,
border_mode='same', subsample=(1, 1), bias=False):
"""
Utility function to apply conv + BN.
(Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
"""
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
x = Convolution2D(nb_filter, nb_row, nb_col,
subsample=subsample,
border_mode=border_mode,
bias=bias)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
return x
示例13: conv_block
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def conv_block(input, nb_filter, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 3x3, Conv2D, optional dropout
Args:
input: Input keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor with batch_norm, relu and convolution2d added
'''
x = Activation('relu')(input)
x = Convolution2D(nb_filter, (3, 3), kernel_initializer="he_uniform", padding="same", use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate is not None:
x = Dropout(dropout_rate)(x)
return x
示例14: transition_block
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def transition_block(input, nb_filter, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D
Args:
input: keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x = Convolution2D(nb_filter, (1, 1), kernel_initializer="he_uniform", padding="same", use_bias=False,
kernel_regularizer=l2(weight_decay))(input)
if dropout_rate is not None:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
return x
示例15: cnn
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution2D [as 别名]
def cnn(trn_set, tst_set):
trn_x, trn_y = trn_set
trn_y = np.squeeze(trn_y, axis=2)
tst_x, tst_y = tst_set
tst_y = np.squeeze(tst_y, axis=2)
model = Sequential()
model.add(Convolution2D(2, 5, 5, activation='sigmoid', input_shape=(1, 28, 28)))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.1))
return model, trn_x, trn_y, tst_x, tst_y
################################################################################