本文整理汇总了Python中keras.initializers.VarianceScaling方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.VarianceScaling方法的具体用法?Python initializers.VarianceScaling怎么用?Python initializers.VarianceScaling使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.initializers
的用法示例。
在下文中一共展示了initializers.VarianceScaling方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: conv2d_bn
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import VarianceScaling [as 别名]
def conv2d_bn(x, nb_filter, num_row, num_col,
padding='same', strides=(1, 1), use_bias=False):
"""
Utility function to apply conv + BN.
(Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
"""
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
x = Convolution2D(nb_filter, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=use_bias,
kernel_regularizer=regularizers.l2(0.00004),
kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None))(x)
x = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(x)
x = Activation('relu')(x)
return x
示例2: conv2d_bn
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import VarianceScaling [as 别名]
def conv2d_bn(x, nb_filter, num_row, num_col,
padding='same', strides=(1, 1), use_bias=False):
"""
Utility function to apply conv + BN.
(Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
"""
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
x = Convolution2D(nb_filter, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=use_bias,
kernel_regularizer=regularizers.l2(0.00004),
kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal',
seed=None))(x)
x = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(x)
x = Activation('relu')(x)
return x
示例3: res_block
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import VarianceScaling [as 别名]
def res_block(self, input_tensor, filters, kernel_size=3, padding="same", **kwargs):
""" Residual block.
Parameters
----------
input_tensor: tensor
The input tensor to the layer
filters: int
The dimensionality of the output space (i.e. the number of output filters in the
convolution)
kernel_size: int, optional
An integer or tuple/list of 2 integers, specifying the height and width of the 2D
convolution window. Can be a single integer to specify the same value for all spatial
dimensions. Default: 3
padding: ["valid", "same"], optional
The padding to use. Default: `"same"`
kwargs: dict
Any additional Keras standard layer keyword arguments
Returns
-------
tensor
The output tensor from the Upscale layer
"""
logger.debug("input_tensor: %s, filters: %s, kernel_size: %s, kwargs: %s)",
input_tensor, filters, kernel_size, kwargs)
name = self._get_name("residual_{}".format(input_tensor.shape[1]))
var_x = LeakyReLU(alpha=0.2, name="{}_leakyrelu_0".format(name))(input_tensor)
if self.use_reflect_padding:
var_x = ReflectionPadding2D(stride=1,
kernel_size=kernel_size,
name="{}_reflectionpadding2d_0".format(name))(var_x)
padding = "valid"
var_x = self.conv2d(var_x, filters,
kernel_size=kernel_size,
padding=padding,
name="{}_conv2d_0".format(name),
**kwargs)
var_x = LeakyReLU(alpha=0.2, name="{}_leakyrelu_1".format(name))(var_x)
if self.use_reflect_padding:
var_x = ReflectionPadding2D(stride=1,
kernel_size=kernel_size,
name="{}_reflectionpadding2d_1".format(name))(var_x)
padding = "valid"
if not self.use_convaware_init:
original_init = self._switch_kernel_initializer(kwargs, VarianceScaling(
scale=0.2,
mode="fan_in",
distribution="uniform"))
var_x = self.conv2d(var_x, filters,
kernel_size=kernel_size,
padding=padding,
**kwargs)
if not self.use_convaware_init:
self._switch_kernel_initializer(kwargs, original_init)
var_x = Add()([var_x, input_tensor])
var_x = LeakyReLU(alpha=0.2, name="{}_leakyrelu_3".format(name))(var_x)
return var_x
# <<< Unbalanced Model Blocks >>> #
示例4: main
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import VarianceScaling [as 别名]
def main():
# Load the data
train_data, train_label, validation_data, validation_label, test_data, test_label, output_info = data_preparation()
num_features = train_data.shape[1]
print('Training data shape = {}'.format(train_data.shape))
print('Validation data shape = {}'.format(validation_data.shape))
print('Test data shape = {}'.format(test_data.shape))
# Set up the input layer
input_layer = Input(shape=(num_features,))
# Set up MMoE layer
mmoe_layers = MMoE(
units=4,
num_experts=8,
num_tasks=2
)(input_layer)
output_layers = []
# Build tower layer from MMoE layer
for index, task_layer in enumerate(mmoe_layers):
tower_layer = Dense(
units=8,
activation='relu',
kernel_initializer=VarianceScaling())(task_layer)
output_layer = Dense(
units=output_info[index][0],
name=output_info[index][1],
activation='softmax',
kernel_initializer=VarianceScaling())(tower_layer)
output_layers.append(output_layer)
# Compile model
model = Model(inputs=[input_layer], outputs=output_layers)
adam_optimizer = Adam()
model.compile(
loss={'income': 'binary_crossentropy', 'marital': 'binary_crossentropy'},
optimizer=adam_optimizer,
metrics=['accuracy']
)
# Print out model architecture summary
model.summary()
# Train the model
model.fit(
x=train_data,
y=train_label,
validation_data=(validation_data, validation_label),
callbacks=[
ROCCallback(
training_data=(train_data, train_label),
validation_data=(validation_data, validation_label),
test_data=(test_data, test_label)
)
],
epochs=100
)
示例5: main
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import VarianceScaling [as 别名]
def main():
# Load the data
train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation()
num_features = train_data.shape[1]
print('Training data shape = {}'.format(train_data.shape))
print('Validation data shape = {}'.format(validation_data.shape))
print('Test data shape = {}'.format(test_data.shape))
# Set up the input layer
input_layer = Input(shape=(num_features,))
# Set up MMoE layer
mmoe_layers = MMoE(
units=16,
num_experts=8,
num_tasks=2
)(input_layer)
output_layers = []
output_info = ['y0', 'y1']
# Build tower layer from MMoE layer
for index, task_layer in enumerate(mmoe_layers):
tower_layer = Dense(
units=8,
activation='relu',
kernel_initializer=VarianceScaling())(task_layer)
output_layer = Dense(
units=1,
name=output_info[index],
activation='linear',
kernel_initializer=VarianceScaling())(tower_layer)
output_layers.append(output_layer)
# Compile model
model = Model(inputs=[input_layer], outputs=output_layers)
learning_rates = [1e-4, 1e-3, 1e-2]
adam_optimizer = Adam(lr=learning_rates[0])
model.compile(
loss={'y0': 'mean_squared_error', 'y1': 'mean_squared_error'},
optimizer=adam_optimizer,
metrics=[metrics.mae]
)
# Print out model architecture summary
model.summary()
# Train the model
model.fit(
x=train_data,
y=train_label,
validation_data=(validation_data, validation_label),
epochs=100
)