本文整理汇总了Python中keras.regularizers.l2方法的典型用法代码示例。如果您正苦于以下问题:Python regularizers.l2方法的具体用法?Python regularizers.l2怎么用?Python regularizers.l2使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.regularizers
的用法示例。
在下文中一共展示了regularizers.l2方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: weather_l2
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def weather_l2(hidden_nums=100,l2=0.01):
input_img = Input(shape=(37,))
hn = Dense(hidden_nums, activation='relu')(input_img)
hn = Dense(hidden_nums, activation='relu',
kernel_regularizer=regularizers.l2(l2))(hn)
out_u = Dense(37, activation='sigmoid',
name='ae_part')(hn)
out_sig = Dense(37, activation='linear',
name='pred_part')(hn)
out_both = concatenate([out_u, out_sig], axis=1, name = 'concatenate')
#weather_model = Model(input_img, outputs=[out_ae, out_pred])
mve_model = Model(input_img, outputs=[out_both])
mve_model.compile(optimizer='adam', loss=mve_loss, loss_weights=[1.])
return mve_model
示例2: cudnn_lstm_block
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def cudnn_lstm_block(unit_nr, return_sequences, bidirectional,
kernel_reg_l2, recurrent_reg_l2, bias_reg_l2,
use_batch_norm, batch_norm_first,
dropout, dropout_mode, use_prelu):
def f(x):
gru_layer = CuDNNLSTM(uunits=unit_nr, return_sequences=return_sequences,
kernel_regularizer=regularizers.l2(kernel_reg_l2),
recurrent_regularizer=regularizers.l2(recurrent_reg_l2),
bias_regularizer=regularizers.l2(bias_reg_l2)
)
if bidirectional:
x = Bidirectional(gru_layer)(x)
else:
x = gru_layer(x)
x = bn_relu_dropout_block(use_batch_norm=use_batch_norm, batch_norm_first=batch_norm_first,
dropout=dropout, dropout_mode=dropout_mode,
use_prelu=use_prelu)(x)
return x
return f
示例3: cudnn_gru_block
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def cudnn_gru_block(unit_nr, return_sequences, bidirectional,
kernel_reg_l2, recurrent_reg_l2, bias_reg_l2,
use_batch_norm, batch_norm_first,
dropout, dropout_mode, use_prelu):
def f(x):
gru_layer = CuDNNGRU(units=unit_nr, return_sequences=return_sequences,
kernel_regularizer=regularizers.l2(kernel_reg_l2),
recurrent_regularizer=regularizers.l2(recurrent_reg_l2),
bias_regularizer=regularizers.l2(bias_reg_l2)
)
if bidirectional:
x = Bidirectional(gru_layer)(x)
else:
x = gru_layer(x)
x = bn_relu_dropout_block(use_batch_norm=use_batch_norm, batch_norm_first=batch_norm_first,
dropout=dropout, dropout_mode=dropout_mode,
use_prelu=use_prelu)(x)
return x
return f
示例4: __init__
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def __init__(self, model_path=None):
if model_path is not None:
self.model = self.load_model(model_path)
else:
# VGG16 last conv features
inputs = Input(shape=(7, 7, 512))
x = Convolution2D(128, 1, 1)(inputs)
x = Flatten()(x)
# Cls head
h_cls = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
h_cls = Dropout(p=0.5)(h_cls)
cls_head = Dense(20, activation='softmax', name='cls')(h_cls)
# Reg head
h_reg = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
h_reg = Dropout(p=0.5)(h_reg)
reg_head = Dense(4, activation='linear', name='reg')(h_reg)
# Joint model
self.model = Model(input=inputs, output=[cls_head, reg_head])
示例5: _initial_conv_block_inception
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4):
''' Adds an initial conv block, with batch norm and relu for the DPN
Args:
input: input tensor
initial_conv_filters: number of filters for initial conv block
weight_decay: weight decay factor
Returns: a keras tensor
'''
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
示例6: _bn_relu_conv_block
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def _bn_relu_conv_block(input, filters, kernel=(3, 3), stride=(1, 1), weight_decay=5e-4):
''' Adds a Batchnorm-Relu-Conv block for DPN
Args:
input: input tensor
filters: number of output filters
kernel: convolution kernel size
stride: stride of convolution
Returns: a keras tensor
'''
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(filters, kernel, padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=stride)(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
return x
示例7: __init__
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def __init__(self,
num_hidden_units=256,
regularizer_val=0.0001,
activation='softsign',
embed_size=64,
embed_dropout=0):
# Network parameters
self._num_hidden_units = num_hidden_units
self._regularizer_value = regularizer_val
self._regularizer = regularizers.l2(regularizer_val)
self._activation = activation
self._embed_size = embed_size
self._embed_dropout = embed_dropout
# model parameters
self._observe_length = 15
self._predict_length = 15
self._encoder_feature_size = 4
self._decoder_feature_size = 4
self._prediction_size = 4
示例8: __initial_conv_block_inception
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def __initial_conv_block_inception(input_tensor, weight_decay=5e-4):
""" Adds an initial conv block, with batch norm and relu for the inception resnext
Args:
input_tensor: input Keras tensor
weight_decay: weight decay factor
Returns: a Keras tensor
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=(2, 2))(input_tensor)
x = BatchNormalization(axis=channel_axis)(x)
x = LeakyReLU()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
示例9: get_Shared_Model
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def get_Shared_Model(input_dim):
sharedNet = Sequential()
sharedNet.add(Dense(128, input_shape=(input_dim,), activation='relu'))
sharedNet.add(Dropout(0.1))
sharedNet.add(Dense(128, activation='relu'))
sharedNet.add(Dropout(0.1))
sharedNet.add(Dense(128, activation='relu'))
# sharedNet.add(Dropout(0.1))
# sharedNet.add(Dense(3, activation='relu'))
# sharedNet = Sequential()
# sharedNet.add(Dense(4096, activation="tanh", kernel_regularizer=l2(2e-3)))
# sharedNet.add(Reshape(target_shape=(64, 64, 1)))
# sharedNet.add(Conv2D(filters=64, kernel_size=3, strides=(2, 2), padding="same", activation="relu", kernel_regularizer=l2(1e-3)))
# sharedNet.add(MaxPooling2D())
# sharedNet.add(Conv2D(filters=128, kernel_size=3, strides=(2, 2), padding="same", activation="relu", kernel_regularizer=l2(1e-3)))
# sharedNet.add(MaxPooling2D())
# sharedNet.add(Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding="same", activation="relu", kernel_regularizer=l2(1e-3)))
# sharedNet.add(Flatten())
# sharedNet.add(Dense(1024, activation="sigmoid", kernel_regularizer=l2(1e-3)))
return sharedNet
示例10: __transition_block
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
# global context block
x = global_context_block(x)
return x
示例11: get_model
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def get_model(num_users, num_items, latent_dim, regs=[0,0]):
# Input variables
user_input = Input(shape=(1,), dtype='int32', name = 'user_input')
item_input = Input(shape=(1,), dtype='int32', name = 'item_input')
MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding',
init = init_normal, W_regularizer = l2(regs[0]), input_length=1)
MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding',
init = init_normal, W_regularizer = l2(regs[1]), input_length=1)
# Crucial to flatten an embedding vector!
user_latent = Flatten()(MF_Embedding_User(user_input))
item_latent = Flatten()(MF_Embedding_Item(item_input))
# Element-wise product of user and item embeddings
predict_vector = merge([user_latent, item_latent], mode = 'mul')
# Final prediction layer
#prediction = Lambda(lambda x: K.sigmoid(K.sum(x)), output_shape=(1,))(predict_vector)
prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(predict_vector)
model = Model(input=[user_input, item_input],
output=prediction)
return model
示例12: conv_factory
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def conv_factory(x, concat_axis, nb_filter,
dropout_rate=None, weight_decay=1E-4):
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (5, 5), dilation_rate=(2, 2),
kernel_initializer="he_uniform",
padding="same",
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
# define dense block
示例13: learnConcatRealImagBlock
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def learnConcatRealImagBlock(I, filter_size, featmaps, stage, block, convArgs, bnArgs, d):
"""Learn initial imaginary component for input."""
conv_name_base = 'res'+str(stage)+block+'_branch'
bn_name_base = 'bn' +str(stage)+block+'_branch'
O = BatchNormalization(name=bn_name_base+'2a', **bnArgs)(I)
O = Activation(d.act)(O)
O = Convolution2D(featmaps[0], filter_size,
name = conv_name_base+'2a',
padding = 'same',
kernel_initializer = 'he_normal',
use_bias = False,
kernel_regularizer = l2(0.0001))(O)
O = BatchNormalization(name=bn_name_base+'2b', **bnArgs)(O)
O = Activation(d.act)(O)
O = Convolution2D(featmaps[1], filter_size,
name = conv_name_base+'2b',
padding = 'same',
kernel_initializer = 'he_normal',
use_bias = False,
kernel_regularizer = l2(0.0001))(O)
return O
示例14: _transition_block
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def _transition_block(input, nb_filter, dropout_rate=None, pooltype=1, weight_decay=1e-4):
x = BatchNormalization(epsilon=1.1e-5)(input)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
if pooltype == 2:
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
elif pooltype == 1:
x = ZeroPadding2D(padding=(0, 1))(x)
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
elif pooltype == 3:
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
return x, nb_filter
示例15: vgg_fc
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l2 [as 别名]
def vgg_fc(filters, weight_decay=0., block_name='block5'):
"""A fully convolutional block for encoding.
:param filters: Integer, number of filters per fc layer
>>> from keras_fcn.blocks import vgg_fc
>>> x = vgg_fc(filters=4096)(x)
"""
def f(x):
fc6 = Conv2D(filters=4096, kernel_size=(7, 7),
activation='relu', padding='same',
dilation_rate=(2, 2),
kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay),
name='{}_fc6'.format(block_name))(x)
drop6 = Dropout(0.5)(fc6)
fc7 = Conv2D(filters=4096, kernel_size=(1, 1),
activation='relu', padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay),
name='{}_fc7'.format(block_name))(drop6)
drop7 = Dropout(0.5)(fc7)
return drop7
return f