本文整理汇总了Python中keras.layers.normalization.BatchNormalization方法的典型用法代码示例。如果您正苦于以下问题:Python normalization.BatchNormalization方法的具体用法?Python normalization.BatchNormalization怎么用?Python normalization.BatchNormalization使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers.normalization
的用法示例。
在下文中一共展示了normalization.BatchNormalization方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_residual_model
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28):
model = keras.models.Sequential()
first_layer_channel = 128
if is_mnist: # size to be changed to 32,32
model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32)
# the first conv
model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same'))
else:
model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols)))
model.add(Activation('relu'))
# [residual-based Conv layers]
residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel)
model.add(residual_blocks)
model.add(BatchNormalization(axis=1))
model.add(Activation('relu'))
# [Classifier]
model.add(Flatten())
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# [END]
return model
示例2: ann_model
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def ann_model(input_shape):
inp = Input(shape=input_shape, name='mfcc_in')
model = inp
model = Conv1D(filters=12, kernel_size=(3), activation='relu')(model)
model = Conv1D(filters=12, kernel_size=(3), activation='relu')(model)
model = Flatten()(model)
model = Dense(56)(model)
model = Activation('relu')(model)
model = BatchNormalization()(model)
model = Dropout(0.2)(model)
model = Dense(28)(model)
model = Activation('relu')(model)
model = BatchNormalization()(model)
model = Dense(1)(model)
model = Activation('sigmoid')(model)
model = Model(inp, model)
return model
示例3: DCGAN_discriminator
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def DCGAN_discriminator():
nb_filters = 64
nb_conv = int(np.floor(np.log(128) / np.log(2)))
list_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)]
input_img = Input(shape=(128, 128, 3))
x = Conv2D(list_filters[0], (3, 3), strides=(2, 2), name="disc_conv2d_1", padding="same")(input_img)
x = BatchNormalization(axis=-1)(x)
x = LeakyReLU(0.2)(x)
# Next convs
for i, f in enumerate(list_filters[1:]):
name = "disc_conv2d_%s" % (i + 2)
x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding="same")(x)
x = BatchNormalization(axis=-1)(x)
x = LeakyReLU(0.2)(x)
x_flat = Flatten()(x)
x_out = Dense(1, activation="sigmoid", name="disc_dense")(x_flat)
discriminator_model = Model(inputs=input_img, outputs=[x_out])
return discriminator_model
示例4: test_weight_init
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def test_weight_init(self):
"""
Test weight initialization
"""
norm_m1 = normalization.BatchNormalization((10,), mode=1, weights=[np.ones(10),np.ones(10)])
for inp in [self.input_1, self.input_2, self.input_3]:
norm_m1.input = inp
out = (norm_m1.get_output(train=True) - np.ones(10))/1.
self.assertAlmostEqual(out.mean().eval(), 0.0)
if inp.std() > 0.:
self.assertAlmostEqual(out.std().eval(), 1.0, places=2)
else:
self.assertAlmostEqual(out.std().eval(), 0.0, places=2)
assert_allclose(norm_m1.gamma.eval(),np.ones(10))
assert_allclose(norm_m1.beta.eval(),np.ones(10))
#Weights must be an iterable of gamma AND beta.
self.assertRaises(Exception,normalization.BatchNormalization(10,), weights = np.ones(10))
示例5: conv_block
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def conv_block(input_tensor, filters, strides, d_rates):
x = Conv2D(filters[0], kernel_size=1, dilation_rate=d_rates[0])(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters[1], kernel_size=3, strides=strides, padding='same', dilation_rate=d_rates[1])(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters[2], kernel_size=1, dilation_rate=d_rates[2])(x)
x = BatchNormalization()(x)
shortcut = Conv2D(filters[2], kernel_size=1, strides=strides)(input_tensor)
shortcut = BatchNormalization()(shortcut)
x = add([x, shortcut])
x = Activation('relu')(x)
return x
示例6: identity_block
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def identity_block(input_tensor, filters, d_rates):
x = Conv2D(filters[0], kernel_size=1, dilation_rate=d_rates[0])(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters[1], kernel_size=3, padding='same', dilation_rate=d_rates[1])(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters[2], kernel_size=1, dilation_rate=d_rates[2])(x)
x = BatchNormalization()(x)
x = add([x, input_tensor])
x = Activation('relu')(x)
return x
示例7: __transition_block
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
# global context block
x = global_context_block(x)
return x
示例8: conv_factory
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def conv_factory(x, concat_axis, nb_filter,
dropout_rate=None, weight_decay=1E-4):
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (5, 5), dilation_rate=(2, 2),
kernel_initializer="he_uniform",
padding="same",
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
# define dense block
示例9: build_model
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def build_model():
"""
定义模型
"""
model = Sequential()
model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=Conf.LAYERS[3]))
# model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
model.add(Activation("tanh"))
# act = PReLU(alpha_initializer='zeros', weights=None)
# act = LeakyReLU(alpha=0.3)
# model.add(act)
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("> Compilation Time : ", time.time() - start)
return model
示例10: generator
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def generator(input_dim,alpha=0.2):
model = Sequential()
model.add(Dense(input_dim=input_dim, output_dim=4*4*512))
model.add(Reshape(target_shape=(4,4,512)))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha))
model.add(Conv2DTranspose(256, kernel_size=5, strides=2, padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha))
model.add(Conv2DTranspose(128, kernel_size=5, strides=2, padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha))
model.add(Conv2DTranspose(3, kernel_size=5, strides=2, padding='same'))
model.add(Activation('tanh'))
return model
#Define the Discriminator Network
示例11: discriminator
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def discriminator(img_dim,alpha=0.2):
model = Sequential()
model.add(
Conv2D(64, kernel_size=5,strides=2,
padding='same',
input_shape=img_dim)
)
model.add(LeakyReLU(alpha))
model.add(Conv2D(128,kernel_size=5,strides=2,padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha))
model.add(Conv2D(256,kernel_size=5,strides=2,padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha))
model.add(Flatten())
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
# Define a combination of Generator and Discriminator
示例12: __transition_block
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
示例13: _build_image_embedding
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def _build_image_embedding(self):
image_model = InceptionV3(include_top=False, weights='imagenet',
pooling='avg')
for layer in image_model.layers:
layer.trainable = False
dense_input = BatchNormalization(axis=-1)(image_model.output)
image_dense = Dense(units=self._embedding_size,
kernel_regularizer=self._regularizer,
kernel_initializer=self._initializer
)(dense_input)
# Add timestep dimension
image_embedding = RepeatVector(1)(image_dense)
image_input = image_model.input
return image_input, image_embedding
示例14: _build_sequence_model
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def _build_sequence_model(self, sequence_input):
RNN = GRU if self._rnn_type == 'gru' else LSTM
def rnn():
rnn = RNN(units=self._rnn_output_size,
return_sequences=True,
dropout=self._dropout_rate,
recurrent_dropout=self._dropout_rate,
kernel_regularizer=self._regularizer,
kernel_initializer=self._initializer,
implementation=2)
rnn = Bidirectional(rnn) if self._bidirectional_rnn else rnn
return rnn
input_ = sequence_input
for _ in range(self._rnn_layers):
input_ = BatchNormalization(axis=-1)(input_)
rnn_out = rnn()(input_)
input_ = rnn_out
time_dist_dense = TimeDistributed(Dense(units=self._vocab_size))(rnn_out)
return time_dist_dense
示例15: transition_block
# 需要导入模块: from keras.layers import normalization [as 别名]
# 或者: from keras.layers.normalization import BatchNormalization [as 别名]
def transition_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x = Convolution2D(nb_filter, 1, 1, init="he_uniform", border_mode="same", bias=False,
W_regularizer=l2(weight_decay))(ip)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
return x