本文整理汇总了Python中tensorflow.contrib.slim.l2_regularizer方法的典型用法代码示例。如果您正苦于以下问题:Python slim.l2_regularizer方法的具体用法?Python slim.l2_regularizer怎么用?Python slim.l2_regularizer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.slim
的用法示例。
在下文中一共展示了slim.l2_regularizer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _extra_conv_arg_scope_with_bn
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
activation_fn=None,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
示例2: _extra_conv_arg_scope
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
padding='SAME',
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,) as arg_sc:
with slim.arg_scope(
[slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn) as arg_sc:
return arg_sc
示例3: inference
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=slim.initializers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v2(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
示例4: inference
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def inference(images, keep_probability, phase_train=True, # @UnusedVariable
bottleneck_layer_size=128, bottleneck_layer_activation=None, weight_decay=0.0, reuse=None): # @UnusedVariable
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
size = np.prod(images.get_shape()[1:].as_list())
net = slim.fully_connected(tf.reshape(images, (-1,size)), bottleneck_layer_size, activation_fn=None,
scope='Bottleneck', reuse=False)
return net, None
示例5: inference
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=slim.initializers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
示例6: encoder
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
示例7: encoder
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
示例8: _build_network
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def _build_network(self):
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(self.weight_decay),
weights_initializer= self.weights_initializer,
biases_initializer = self.biases_initializer):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
padding='SAME',
data_format = self.data_format):
with tf.variable_scope(self.basenet_type):
basenet, end_points = net_factory.get_basenet(self.basenet_type, self.inputs);
with tf.variable_scope('extra_layers'):
self.net, self.end_points = self._add_extra_layers(basenet, end_points);
with tf.variable_scope('seglink_layers'):
self._add_seglink_layers();
示例9: resnet_arg_scope
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def resnet_arg_scope(is_training=True,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': False,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
weights_initializer=slim.variance_scaling_initializer(),
trainable=is_training,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
示例10: _arg_scope
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def _arg_scope(self, is_training, reuse=None):
weight_decay = 0.0
keep_probability = 1.0
batch_norm_params = {
'is_training': is_training,
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with tf.variable_scope(self._scope, self._scope, reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training) as sc:
return sc
示例11: conv3d
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def conv3d(
input,
output_chn,
kernel_size,
stride,
use_bias=False,
name='conv'):
return tf.layers.conv3d(
inputs=input,
filters=output_chn,
kernel_size=kernel_size,
strides=stride,
padding='same',
data_format='channels_last',
kernel_initializer=tf.truncated_normal_initializer(
0.0,
0.01),
kernel_regularizer=slim.l2_regularizer(0.0005),
use_bias=use_bias,
name=name)
示例12: Unsample
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def Unsample(input, output_chn, name):
batch, in_depth, in_height, in_width, in_channels = [
int(d) for d in input.get_shape()]
base = input.shape[-2]
data = 96 / int(base)
print("base shape", data)
filter = tf.get_variable(
name + "/filter",
shape=[
4,
4,
4,
output_chn,
in_channels],
dtype=tf.float32,
initializer=tf.random_normal_initializer(
0,
0.01),
regularizer=slim.l2_regularizer(0.0005))
conv = tf.nn.conv3d_transpose(
value=input, filter=filter, output_shape=[
batch, 96, 96, 96, output_chn], strides=[
1, data, data, data, 1], padding="SAME", name=name)
return conv
示例13: resnet_arg_scope
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def resnet_arg_scope(freeze_norm, is_training=True, weight_decay=0.0001,
batch_norm_decay=0.9, batch_norm_epsilon=1e-5, batch_norm_scale=True):
batch_norm_params = {
'is_training': False, 'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale,
'trainable': False,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
'data_format': DATA_FORMAT
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
trainable=is_training,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
示例14: create_model
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, original_input=None, epsilon=1e-5, **unused_params):
"""Creates a non-unified matrix regression model.
Args:
model_input: 'batch' x 'num_features' x 'num_methods' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
num_features = model_input.get_shape().as_list()[-2]
num_methods = model_input.get_shape().as_list()[-1]
log_model_input = tf.stop_gradient(tf.log((epsilon + model_input) / (1.0 + epsilon - model_input)))
weight = tf.get_variable("ensemble_weight",
shape=[num_features, num_methods],
regularizer=slim.l2_regularizer(l2_penalty))
weight = tf.nn.softmax(weight)
output = tf.nn.sigmoid(tf.einsum("ijk,jk->ij", log_model_input, weight))
return {"predictions": output}
示例15: create_model
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import l2_regularizer [as 别名]
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
sub_scope="",
original_input=None,
**unused_params):
num_methods = model_input.get_shape().as_list()[-1]
num_features = model_input.get_shape().as_list()[-2]
original_input = tf.nn.l2_normalize(original_input, dim=1)
gate_activations = slim.fully_connected(
original_input,
num_methods,
activation_fn=tf.nn.softmax,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates"+sub_scope)
output = tf.einsum("ijk,ik->ij", model_input, gate_activations)
return {"predictions": output}