本文整理汇总了Python中tensorflow.keras.regularizers.l2方法的典型用法代码示例。如果您正苦于以下问题:Python regularizers.l2方法的具体用法?Python regularizers.l2怎么用?Python regularizers.l2使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.regularizers
的用法示例。
在下文中一共展示了regularizers.l2方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def __init__(self, out_features,**kwargs):
super(_DenseLayer, self).__init__(**kwargs)
k_reg = None if w_decay is None else l2(w_decay)
self.layers = []
self.layers.append(tf.keras.Sequential(
[
layers.ReLU(),
layers.Conv2D(
filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same',
use_bias=True, kernel_initializer=weight_init,
kernel_regularizer=k_reg),
layers.BatchNormalization(),
layers.ReLU(),
layers.Conv2D(
filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same',
use_bias=True, kernel_initializer=weight_init,
kernel_regularizer=k_reg),
layers.BatchNormalization(),
])) # first relu can be not needed
示例2: __initial_conv_block_inception
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def __initial_conv_block_inception(input_tensor, weight_decay=5e-4):
""" Adds an initial conv block, with batch norm and relu for the inception resnext
Args:
input_tensor: input Keras tensor
weight_decay: weight decay factor
Returns: a Keras tensor
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=(2, 2))(input_tensor)
x = BatchNormalization(axis=channel_axis)(x)
x = LeakyReLU()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
示例3: create_model
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def create_model(trainable=False):
model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights="imagenet")
for layer in model.layers:
layer.trainable = trainable
block = model.get_layer("block_16_project_BN").output
x = Conv2D(112, padding="same", kernel_size=3, strides=1, activation="relu")(block)
x = Conv2D(112, padding="same", kernel_size=3, strides=1, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(5, padding="same", kernel_size=1, activation="sigmoid")(x)
model = Model(inputs=model.input, outputs=x)
# divide by 2 since d/dweight learning_rate * weight^2 = 2 * learning_rate * weight
# see https://arxiv.org/pdf/1711.05101.pdf
regularizer = l2(WEIGHT_DECAY / 2)
for weight in model.trainable_weights:
with tf.keras.backend.name_scope("weight_regularizer"):
model.add_loss(regularizer(weight)) # in tf2.0: lambda: regularizer(weight)
return model
示例4: triplet_network
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def triplet_network(base_network, embedding_dims=2, embedding_l2=0.0):
def output_shape(shapes):
shape1, shape2, shape3 = shapes
return (3, shape1[0],)
input_a = Input(shape=base_network.input_shape[1:])
input_p = Input(shape=base_network.input_shape[1:])
input_n = Input(shape=base_network.input_shape[1:])
embeddings = Dense(embedding_dims,
kernel_regularizer=l2(embedding_l2))(base_network.output)
network = Model(base_network.input, embeddings)
processed_a = network(input_a)
processed_p = network(input_p)
processed_n = network(input_n)
triplet = Lambda(K.stack,
output_shape=output_shape,
name='stacked_triplets')([processed_a,
processed_p,
processed_n],)
model = Model([input_a, input_p, input_n], triplet)
return model, processed_a, processed_p, processed_n
示例5: create_seq_modeling
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def create_seq_modeling(in_,
input_dims,
data_per_period,
n_periods,
n_classes,
transition_window,
name_prefix=""):
cls = AveragePooling2D((data_per_period, 1),
name="{}average_pool".format(name_prefix))(in_)
out = Conv2D(filters=n_classes,
kernel_size=(transition_window, 1),
activation="softmax",
kernel_regularizer=regularizers.l2(1e-5),
padding="same",
name="{}sequence_conv_out".format(name_prefix))(cls)
s = [-1, n_periods, input_dims//data_per_period, n_classes]
if s[2] == 1:
s.pop(2) # Squeeze the dim
out = Lambda(lambda x: tf.reshape(x, s),
name="{}sequence_classification_reshaped".format(name_prefix))(out)
return out
示例6: log
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def log(self):
self.logger("{} Model Summary\n"
"-------------------".format(__class__.__name__))
self.logger("N periods: {}".format(self.n_periods))
self.logger("Input dims: {}".format(self.input_dims))
self.logger("N channels: {}".format(self.n_channels))
self.logger("N classes: {}".format(self.n_classes))
self.logger("Kernel size: {}".format(self.kernel_size))
self.logger("Dilation rate: {}".format(self.dilation))
self.logger("CF factor: %.3f" % self.cf)
self.logger("Init filters: {}".format(self.init_filters))
self.logger("Depth: %i" % self.depth)
self.logger("Poolings: {}".format(self.pools))
self.logger("Transition window {}".format(self.transition_window))
self.logger("Dense activation {}".format(self.dense_classifier_activation))
self.logger("l2 reg: %s" % self.l2_reg)
self.logger("Padding: %s" % self.padding)
self.logger("Conv activation: %s" % self.activation)
self.logger("Receptive field: %s" % self.receptive_field[0])
self.logger("Seq length.: {}".format(self.n_periods*self.input_dims))
self.logger("N params: %i" % self.count_params())
self.logger("Input: %s" % self.input)
self.logger("Output: %s" % self.output)
示例7: buildModel
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def buildModel(patchShape, numClasses):
input = Input(shape=patchShape)
n_base_fileter = 32
_handle_data_format()
conv = Conv3D(filters=n_base_fileter, kernel_size=(7, 7, 7),
strides=(2, 2, 2), kernel_initializer="he_normal",
)(input)
norm = BatchNormalization(axis=CHANNEL_AXIS)(conv)
conv1 = Activation("relu")(norm)
pool1 = MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2),
padding="same")(conv1)
flatten1 = Flatten()(pool1)
dense = Dense(units=numClasses,
kernel_initializer="he_normal",
activation="softmax",
kernel_regularizer=l2(1e-4))(flatten1)
model = Model(inputs=input, outputs=dense)
return model
示例8: log
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def log(self):
self.logger("Multi-Task UNet Model Summary\n"
"-----------------------------")
self.logger("N classes: %s" % list(self.n_classes))
self.logger("CF factor: %.3f" % self.cf**2)
self.logger("Depth: %i" % self.depth)
self.logger("l2 reg: %s" % self.l2_reg)
self.logger("Padding: %s" % self.padding)
self.logger("Conv activation: %s" % self.activation)
self.logger("Out activation: %s" % list(self.out_activation))
self.logger("Receptive field: %s" % self.receptive_field)
self.logger("N params: %i" % self.count_params())
self.logger("N tasks: %i" % self.n_tasks)
if self.n_tasks > 1:
inputs = self.input
outputs = self.output
else:
inputs = [self.input]
outputs = [self.output]
for i, (id_, in_, out) in enumerate(zip(self.task_IDs, inputs, outputs)):
self.logger("\n--- Task %s ---" % id_)
self.logger("In shape: %s" % in_.shape)
self.logger("Out shape: %s\n" % out.shape)
示例9: log
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def log(self):
self.logger("UNet Model Summary\n------------------")
self.logger("Image rows: %i" % self.img_shape[0])
self.logger("Image cols: %i" % self.img_shape[1])
self.logger("Image channels: %i" % self.img_shape[2])
self.logger("N classes: %i" % self.n_classes)
self.logger("CF factor: %.3f" % self.cf**2)
self.logger("Depth: %i" % self.depth)
self.logger("l2 reg: %s" % self.l2_reg)
self.logger("Padding: %s" % self.padding)
self.logger("Conv activation: %s" % self.activation)
self.logger("Out activation: %s" % self.out_activation)
self.logger("Receptive field: %s" % self.receptive_field)
self.logger("N params: %i" % self.count_params())
self.logger("Output: %s" % self.output)
self.logger("Crop: %s" % (self.label_crop if np.sum(self.label_crop) != 0 else "None"))
示例10: _initialize
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def _initialize(self):
if isinstance(self.char_window_size, int):
self.char_window_size = [self.char_window_size]
if self.char_filters is None or isinstance(self.char_filters, int):
self.char_filters = [self.char_filters] * len(self.char_window_size)
if len(self.char_window_size) != len(self.char_filters):
raise ValueError("There should be the same number of window sizes and filter sizes")
if isinstance(self.word_lstm_units, int):
self.word_lstm_units = [self.word_lstm_units] * self.word_lstm_layers
if len(self.word_lstm_units) != self.word_lstm_layers:
raise ValueError("There should be the same number of lstm layer units and lstm layers")
if self.word_vectorizers is None:
self.word_vectorizers = []
if self.regularizer is not None:
self.regularizer = l2(self.regularizer)
if self.verbose > 0:
log.info("{} symbols, {} tags in CharacterTagger".format(len(self.symbols), len(self.tags)))
示例11: _cnn_
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def _cnn_(cnn_input_shape,name=None):
with tf.variable_scope(name or 'convnet', reuse=tf.AUTO_REUSE):
convnet = Sequential()
convnet.add(Conv1D(230, 3,
input_shape = cnn_input_shape,
kernel_initializer = W_init,
bias_initializer = b_init_conv,
kernel_regularizer=l2(2e-4)
))
convnet.add(MaxPooling1D(pool_size=cnn_input_shape[0]-4))
convnet.add(Activation('relu'))
convnet.add(Flatten())
convnet.add(Dense(cnn_input_shape[-1]*230, activation = 'sigmoid',
kernel_initializer = W_init,
bias_initializer = b_init_dense,
kernel_regularizer=l2(1e-3)
))
return convnet
示例12: transition_layer
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def transition_layer(x, nb_channels, dropout_rate=None, compression=1.0, weight_decay=1e-4):
"""
Creates a transition layer between dense blocks as transition, which do convolution and pooling.
Works as downsampling.
"""
x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(int(nb_channels*compression), (1, 1), padding='same',
use_bias=False, kernel_regularizer=l2(weight_decay))(x)
# Adding dropout
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
示例13: depthwiseConv_bn
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def depthwiseConv_bn(x, depth_multiplier, kernel_size, strides=1):
""" Depthwise convolution
The DepthwiseConv2D is just the first step of the Depthwise Separable convolution (without the pointwise step).
Depthwise Separable convolutions consists in performing just the first step in a depthwise spatial convolution
(which acts on each input channel separately).
This function defines a 2D Depthwise separable convolution operation with BN and relu6.
# Arguments
x: Tensor, input tensor of conv layer.
filters: Integer, the dimensionality of the output space.
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
# Returns
Output tensor.
"""
x = layers.DepthwiseConv2D(kernel_size=kernel_size, strides=strides, depth_multiplier=depth_multiplier,
padding='same', use_bias=False, kernel_regularizer=regularizers.l2(l=0.0003))(x)
x = layers.BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = layers.ReLU(max_value=6)(x)
return x
示例14: build
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def build(self, input_shape, num_output, repetitions=3):
input_x = Input(shape=input_shape)
feature_maps = self.extract_multi_resolution_feature(repetitions=repetitions)(input_x)
x = self.make_classification_head(feature_maps, self.filter_list)
x = Conv2D(filters=x.get_shape().as_list()[-1] * 2, kernel_size=(1, 1), strides=(1, 1), padding='same', kernel_regularizer=l2(1e-4))(x)
x = BatchNormalization(axis=-1)(x, training=self.training)
x = Activation("relu")(x)
x = GlobalAveragePooling2D()(x)
x = Flatten()(x)
x = Dense(units=num_output,
name='final_fully_connected',
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4),
activation='softmax')(x)
return Model(inputs=input_x, outputs=x)
示例15: conv2d_unit
# 需要导入模块: from tensorflow.keras import regularizers [as 别名]
# 或者: from tensorflow.keras.regularizers import l2 [as 别名]
def conv2d_unit(x, filters, kernels, strides=1):
"""Convolution Unit
This function defines a 2D convolution operation with BN and LeakyReLU.
# Arguments
x: Tensor, input tensor of conv layer.
filters: Integer, the dimensionality of the output space.
kernels: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and
height. Can be a single integer to specify the same value for
all spatial dimensions.
# Returns
Output tensor.
"""
x = Conv2D(filters, kernels,
padding='same',
strides=strides,
activation='linear',
kernel_regularizer=l2(5e-4))(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x