本文整理汇总了Python中tensorflow.python.keras.regularizers.l2方法的典型用法代码示例。如果您正苦于以下问题:Python regularizers.l2方法的具体用法?Python regularizers.l2怎么用?Python regularizers.l2使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.keras.regularizers
的用法示例。
在下文中一共展示了regularizers.l2方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def build(self, input_shape):
input_size = input_shape[-1]
hidden_units = [int(input_size)] + list(self.hidden_size)
self.kernels = [self.add_weight(name='kernel' + str(i),
shape=(
hidden_units[i], hidden_units[i+1]),
initializer=glorot_normal(
seed=self.seed),
regularizer=l2(self.l2_reg),
trainable=True) for i in range(len(self.hidden_size))]
self.bias = [self.add_weight(name='bias' + str(i),
shape=(self.hidden_size[i],),
initializer=Zeros(),
trainable=True) for i in range(len(self.hidden_size))]
super(MLP, self).build(input_shape) # Be sure to call this somewhere!
示例2: call
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def call(self, inputs, training=None, **kwargs):
deep_input = inputs
for i in range(len(self.hidden_size)):
fc = tf.nn.bias_add(tf.tensordot(
deep_input, self.kernels[i], axes=(-1, 0)), self.bias[i])
# fc = Dense(self.hidden_size[i], activation=None, \
# kernel_initializer=glorot_normal(seed=self.seed), \
# kernel_regularizer=l2(self.l2_reg))(deep_input)
if self.use_bn:
fc = tf.keras.layers.BatchNormalization()(fc)
fc = activation_fun(self.activation, fc)
#fc = tf.nn.dropout(fc, self.keep_prob)
fc = tf.keras.layers.Dropout(1 - self.keep_prob)(fc,)
deep_input = fc
return deep_input
示例3: merge_dense_input
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def merge_dense_input(dense_input_, embed_list, embedding_size, l2_reg):
dense_input = list(dense_input_.values())
if len(dense_input) > 0:
if embedding_size == "auto":
if len(dense_input) == 1:
continuous_embedding_list = dense_input[0]
else:
continuous_embedding_list = Concatenate()(dense_input)
continuous_embedding_list = Reshape(
[1, len(dense_input)])(continuous_embedding_list)
embed_list.append(continuous_embedding_list)
else:
continuous_embedding_list = list(
map(Dense(embedding_size, use_bias=False, kernel_regularizer=l2(l2_reg), ),
dense_input))
continuous_embedding_list = list(
map(Reshape((1, embedding_size)), continuous_embedding_list))
embed_list += continuous_embedding_list
return embed_list
示例4: get_linear_logit
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def get_linear_logit(linear_emb_list, dense_input_dict, l2_reg):
if len(linear_emb_list) > 1:
linear_term = add(linear_emb_list)
elif len(linear_emb_list) == 1:
linear_term = linear_emb_list[0]
else:
linear_term = None
dense_input = list(dense_input_dict.values())
if len(dense_input) > 0:
dense_input__ = dense_input[0] if len(
dense_input) == 1 else Concatenate()(dense_input)
linear_dense_logit = Dense(
1, activation=None, use_bias=False, kernel_regularizer=l2(l2_reg))(dense_input__)
if linear_term is not None:
linear_term = add([linear_dense_logit, linear_term])
else:
linear_term = linear_dense_logit
return linear_term
示例5: __transition_block
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
示例6: __transition_up_block
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4):
''' SubpixelConvolutional Upscaling (factor = 2)
Args:
ip: keras tensor
nb_filters: number of layers
type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed
weight_decay: weight decay factor
Returns: keras tensor, after applying upsampling operation.
'''
if type == 'upsampling':
x = UpSampling2D()(ip)
elif type == 'subpixel':
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False, kernel_initializer='he_normal')(ip)
x = SubPixelUpscaling(scale_factor=2)(x)
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False, kernel_initializer='he_normal')(x)
else:
x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip)
return x
示例7: _conv_bn_relu
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu block
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
conv = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(input)
return _bn_relu(conv)
return f
示例8: _bn_relu_conv
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv block.
This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
activation = _bn_relu(input)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(activation)
return f
示例9: _shortcut
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def _shortcut(input, residual):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
示例10: basic_block
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv1 = Conv2D(filters=filters, kernel_size=(3, 3),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),
strides=init_strides)(input)
residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1)
return _shortcut(input, residual)
return f
示例11: bottleneck
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def bottleneck(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv_1_1 = Conv2D(filters=filters, kernel_size=(1, 1),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv_1_1 = _bn_relu_conv(filters=filters, kernel_size=(1, 1),
strides=init_strides)(input)
conv_3_3 = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv_1_1)
residual = _bn_relu_conv(filters=filters * 4, kernel_size=(1, 1))(conv_3_3)
return _shortcut(input, residual)
return f
示例12: build
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def build(self, input_shapes):
if self.feature_less:
input_dim = int(input_shapes[0][-1])
else:
assert len(input_shapes) == 2
features_shape = input_shapes[0]
input_dim = int(features_shape[-1])
self.kernel = self.add_weight(shape=(input_dim,
self.units),
initializer=glorot_uniform(
seed=self.seed),
regularizer=l2(self.l2_reg),
name='kernel', )
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer=Zeros(),
name='bias', )
self.dropout = Dropout(self.dropout_rate, seed=self.seed)
self.built = True
示例13: build
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def build(self, input_shapes):
self.dense_layers = [Dense(
self.input_dim, activation='relu', use_bias=True, kernel_regularizer=l2(self.l2_reg))]
self.neigh_weights = self.add_weight(
shape=(self.input_dim * 2, self.output_dim),
initializer=glorot_uniform(
seed=self.seed),
regularizer=l2(self.l2_reg),
name="neigh_weights")
if self.use_bias:
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=Zeros(),
name='bias_weight')
self.built = True
示例14: create_embedding_dict
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def create_embedding_dict(sparse_feature_columns, varlen_sparse_feature_columns, seed, l2_reg,
prefix='sparse_', seq_mask_zero=True):
sparse_embedding = {}
for feat in sparse_feature_columns:
emb = Embedding(feat.vocabulary_size, feat.embedding_dim,
embeddings_initializer=feat.embeddings_initializer,
embeddings_regularizer=l2(l2_reg),
name=prefix + '_emb_' + feat.embedding_name)
emb.trainable = feat.trainable
sparse_embedding[feat.embedding_name] = emb
if varlen_sparse_feature_columns and len(varlen_sparse_feature_columns) > 0:
for feat in varlen_sparse_feature_columns:
# if feat.name not in sparse_embedding:
emb = Embedding(feat.vocabulary_size, feat.embedding_dim,
embeddings_initializer=feat.embeddings_initializer,
embeddings_regularizer=l2(
l2_reg),
name=prefix + '_seq_emb_' + feat.name,
mask_zero=seq_mask_zero)
emb.trainable = feat.trainable
sparse_embedding[feat.embedding_name] = emb
return sparse_embedding
示例15: build
# 需要导入模块: from tensorflow.python.keras import regularizers [as 别名]
# 或者: from tensorflow.python.keras.regularizers import l2 [as 别名]
def build(self, input_shape):
# if len(self.hidden_units) == 0:
# raise ValueError("hidden_units is empty")
input_size = input_shape[-1]
hidden_units = [int(input_size)] + list(self.hidden_units)
self.kernels = [self.add_weight(name='kernel' + str(i),
shape=(
hidden_units[i], hidden_units[i + 1]),
initializer=glorot_normal(
seed=self.seed),
regularizer=l2(self.l2_reg),
trainable=True) for i in range(len(self.hidden_units))]
self.bias = [self.add_weight(name='bias' + str(i),
shape=(self.hidden_units[i],),
initializer=Zeros(),
trainable=True) for i in range(len(self.hidden_units))]
if self.use_bn:
self.bn_layers = [tf.keras.layers.BatchNormalization() for _ in range(len(self.hidden_units))]
self.dropout_layers = [tf.keras.layers.Dropout(self.dropout_rate, seed=self.seed + i) for i in
range(len(self.hidden_units))]
self.activation_layers = [activation_layer(self.activation) for _ in range(len(self.hidden_units))]
super(DNN, self).build(input_shape) # Be sure to call this somewhere!