本文整理汇总了Python中tensorflow.contrib.slim.l2_regularizer函数的典型用法代码示例。如果您正苦于以下问题:Python l2_regularizer函数的具体用法?Python l2_regularizer怎么用?Python l2_regularizer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了l2_regularizer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_model
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
示例2: create_model
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
feature_size = model_input.get_shape().as_list()[2]
denominators = tf.reshape(
tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
avg_pooled = tf.reduce_sum(model_input,
axis=[1]) / denominators
output = slim.fully_connected(
avg_pooled, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(1e-8))
return {"predictions": output}
示例3: inference
def inference(image_batch, keep_probability,
phase_train=True, bottleneck_layer_size=512,
weight_decay=0.0):
batch_norm_params = {
'decay': 0.995,
'epsilon': 0.001,
'scale':True,
'is_training': phase_train,
'updates_collections': None,
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ]
}
with tf.variable_scope('Resface'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=prelu,
normalizer_fn=slim.batch_norm,
#normalizer_fn=None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.conv2d], kernel_size=3):
return resface20(images=image_batch,
keep_probability=keep_probability,
phase_train=phase_train,
bottleneck_layer_size=bottleneck_layer_size,
reuse=None)
示例4: encoder
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = images
net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
示例5: inference
def inference(image_batch, keep_probability,
phase_train=True, bottleneck_layer_size=512,
weight_decay=0.0):
with tf.variable_scope('LResnetE_IR'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=None, #default no biases
activation_fn=None,
normalizer_fn=None
):
with slim.arg_scope([slim.conv2d], kernel_size=3):
with slim.arg_scope([slim.batch_norm],
decay=0.995,
epsilon=1e-5,
scale=True,
is_training=phase_train,
activation_fn=prelu,
updates_collections=None,
variables_collections=[ tf.GraphKeys.TRAINABLE_VARIABLES ]
):
return LResnet50E_IR(images=image_batch,
keep_probability=keep_probability,
phase_train=phase_train,
bottleneck_layer_size=bottleneck_layer_size,
reuse=None)
示例6: _extra_conv_arg_scope
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
padding='SAME',
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,) as arg_sc:
with slim.arg_scope(
[slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn) as arg_sc:
return arg_sc
示例7: decoder
def decoder(self, latent_var, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('decoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.fully_connected(latent_var, 4096, activation_fn=None, normalizer_fn=None, scope='Fc_1')
net = tf.reshape(net, [-1,4,4,256], name='Reshape')
net = tf.image.resize_nearest_neighbor(net, size=(8,8), name='Upsample_1')
net = slim.conv2d(net, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = tf.image.resize_nearest_neighbor(net, size=(16,16), name='Upsample_2')
net = slim.conv2d(net, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = tf.image.resize_nearest_neighbor(net, size=(32,32), name='Upsample_3')
net = slim.conv2d(net, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = tf.image.resize_nearest_neighbor(net, size=(64,64), name='Upsample_4')
net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=None, scope='Conv2d_4c')
return net
示例8: build_feature_pyramid
def build_feature_pyramid(self):
'''
reference: https://github.com/CharlesShang/FastMaskRCNN
build P2, P3, P4, P5, P6
:return: multi-scale feature map
'''
feature_pyramid = {}
with tf.variable_scope('feature_pyramid'):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(self.rpn_weight_decay)):
feature_pyramid['P5'] = slim.conv2d(self.feature_maps_dict['C5'],
num_outputs=256,
kernel_size=[1, 1],
stride=1,
scope='build_P5')
feature_pyramid['P6'] = slim.max_pool2d(feature_pyramid['P5'],
kernel_size=[2, 2], stride=2, scope='build_P6')
# P6 is down sample of P5
for layer in range(4, 1, -1):
p, c = feature_pyramid['P' + str(layer + 1)], self.feature_maps_dict['C' + str(layer)]
up_sample_shape = tf.shape(c)
up_sample = tf.image.resize_nearest_neighbor(p, [up_sample_shape[1], up_sample_shape[2]],
name='build_P%d/up_sample_nearest_neighbor' % layer)
c = slim.conv2d(c, num_outputs=256, kernel_size=[1, 1], stride=1,
scope='build_P%d/reduce_dimension' % layer)
p = up_sample + c
p = slim.conv2d(p, 256, kernel_size=[3, 3], stride=1,
padding='SAME', scope='build_P%d/avoid_aliasing' % layer)
feature_pyramid['P' + str(layer)] = p
return feature_pyramid
示例9: build_graph
def build_graph(self, image, label):
image = tf.expand_dims(image, 3)
image = image * 2 - 1
is_training = get_current_tower_context().is_training
with slim.arg_scope([slim.layers.fully_connected],
weights_regularizer=slim.l2_regularizer(1e-5)):
l = slim.layers.conv2d(image, 32, [3, 3], scope='conv0')
l = slim.layers.max_pool2d(l, [2, 2], scope='pool0')
l = slim.layers.conv2d(l, 32, [3, 3], padding='SAME', scope='conv1')
l = slim.layers.conv2d(l, 32, [3, 3], scope='conv2')
l = slim.layers.max_pool2d(l, [2, 2], scope='pool1')
l = slim.layers.conv2d(l, 32, [3, 3], scope='conv3')
l = slim.layers.flatten(l, scope='flatten')
l = slim.layers.fully_connected(l, 512, scope='fc0')
l = slim.layers.dropout(l, is_training=is_training)
logits = slim.layers.fully_connected(l, 10, activation_fn=None, scope='fc1')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
acc = tf.to_float(tf.nn.in_top_k(logits, label, 1))
acc = tf.reduce_mean(acc, name='accuracy')
summary.add_moving_summary(acc)
summary.add_moving_summary(cost)
summary.add_param_summary(('.*/weights', ['histogram', 'rms'])) # slim uses different variable names
return cost + regularize_cost_from_collection()
示例10: _l2_regularized_embedding
def _l2_regularized_embedding(self, n_class, h_dim, scope_name, var_name='y_emb'):
with tf.variable_scope(scope_name):
embeddings = tf.get_variable(
name=var_name,
shape=[n_class, h_dim],
regularizer=slim.l2_regularizer(1e-6))
return embeddings
示例11: inference
def inference(self):
_x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
# tf.image_summary(_x.op.name, _x, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
# Split out the color channels
_, model_g, model_b = tf.split(_x, 3, 3, name='split_channels')
# tf.image_summary(model_g.op.name, model_g, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
# tf.image_summary(model_b.op.name, model_b, max_images=10, collections=[digits.GraphKeys.SUMMARIES_TRAIN])
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005)):
with tf.variable_scope("siamese") as scope:
def make_tower(net):
net = slim.conv2d(net, 20, [5, 5], padding='VALID', scope='conv1')
net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool1')
net = slim.conv2d(net, 50, [5, 5], padding='VALID', scope='conv2')
net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool2')
net = slim.flatten(net)
net = slim.fully_connected(net, 500, scope='fc1')
net = slim.fully_connected(net, 2, activation_fn=None, scope='fc2')
return net
model_g = make_tower(model_g)
model_g = tf.reshape(model_g, shape=[-1, 2])
scope.reuse_variables()
model_b = make_tower(model_b)
model_b = tf.reshape(model_b, shape=[-1, 2])
return [model_g, model_b]
示例12: __init__
def __init__(self, net, labels_one_hot, model_params, method_params):
"""Stores argument in member variable for further use.
Args:
net: A tensor with shape [batch_size, num_features, feature_size] which
contains some extracted image features.
labels_one_hot: An optional (can be None) ground truth labels for the
input features. Is a tensor with shape
[batch_size, seq_length, num_char_classes]
model_params: A namedtuple with model parameters (model.ModelParams).
method_params: A SequenceLayerParams instance.
"""
self._params = model_params
self._mparams = method_params
self._net = net
self._labels_one_hot = labels_one_hot
self._batch_size = net.get_shape().dims[0].value
# Initialize parameters for char logits which will be computed on the fly
# inside an LSTM decoder.
self._char_logits = {}
regularizer = slim.l2_regularizer(self._mparams.weight_decay)
self._softmax_w = slim.model_variable(
'softmax_w',
[self._mparams.num_lstm_units, self._params.num_char_classes],
initializer=orthogonal_initializer,
regularizer=regularizer)
self._softmax_b = slim.model_variable(
'softmax_b', [self._params.num_char_classes],
initializer=tf.zeros_initializer(),
regularizer=regularizer)
示例13: __init__
def __init__(self,is_training):
self.input_image = tf.placeholder(dtype=tf.float32,shape=[None,64,64,3],name='input_image')
self.input_label = tf.placeholder(dtype=tf.float32,shape=[None,100],name='input_label')
self.input_nlcd = tf.placeholder(dtype=tf.float32,shape=[None,15],name='input_nlcd')
#logits, end_points = resnet_v2.resnet_v2_50(self.input_image, num_classes=100, is_training=True)
# flatten_hist = tf.reshape(self.input_image,[-1,96])
self.keep_prob = tf.placeholder(tf.float32)
weights_regularizer=slim.l2_regularizer(FLAGS.weight_decay)
flatten_hist = tf.reshape(self.input_image,[-1,3*64*64])
flatten_hist = tf.concat([flatten_hist,self.input_nlcd],1)
x = slim.fully_connected(flatten_hist, 512,weights_regularizer=weights_regularizer,scope='decoder/fc_1')
x = slim.fully_connected(x, 1024,weights_regularizer=weights_regularizer, scope='decoder/fc_2')
flatten_hist = slim.fully_connected(x, 512,weights_regularizer=weights_regularizer, scope='decoder/fc_3')
all_logits = []
all_output = []
for i in range(100):
if i == 0 :
current_input_x = flatten_hist
else:
current_output = tf.concat(all_output,1)
current_input_x = tf.concat([flatten_hist,current_output],1)
x = slim.fully_connected(current_input_x, 256,weights_regularizer=weights_regularizer)
x = slim.fully_connected(x, 100,weights_regularizer=weights_regularizer)
#x = slim.fully_connected(x, 17,weights_regularizer=weights_regularizer)
x = slim.dropout(x,keep_prob=self.keep_prob,is_training=is_training)
all_logits.append(slim.fully_connected(x, 1, activation_fn=None, weights_regularizer=weights_regularizer))
all_output.append(tf.sigmoid(all_logits[i]))
final_logits = tf.concat(all_logits,1)
final_output = tf.sigmoid(final_logits)
self.output = final_output
self.ce_loss = tf.reduce_mean(tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_label,logits=final_logits),1))
slim.losses.add_loss(self.ce_loss)
tf.summary.scalar('ce_loss',self.ce_loss)
# l2 loss
self.l2_loss = tf.add_n(slim.losses.get_regularization_losses())
tf.summary.scalar('l2_loss',self.l2_loss)
#total loss
self.total_loss = slim.losses.get_total_loss()
tf.summary.scalar('total_loss',self.total_loss)
#self.output = tf.sigmoid(x)
示例14: build_resnet50
def build_resnet50(inputs, get_pred, is_training, var_scope):
batch_norm_params = {'is_training': is_training}
with tf.variable_scope(var_scope) as sc:
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params,
weights_regularizer=slim.l2_regularizer(0.0001),
activation_fn=tf.nn.relu):
conv1 = conv(inputs, 64, 7, 2) # H/2 - 64D
pool1 = maxpool(conv1, 3) # H/4 - 64D
conv2 = resblock(pool1, 64, 3) # H/8 - 256D
conv3 = resblock(conv2, 128, 4) # H/16 - 512D
conv4 = resblock(conv3, 256, 6) # H/32 - 1024D
conv5 = resblock(conv4, 512, 3) # H/64 - 2048D
skip1 = conv1
skip2 = pool1
skip3 = conv2
skip4 = conv3
skip5 = conv4
# DECODING
upconv6 = upconv(conv5, 512, 3, 2) #H/32
upconv6 = resize_like(upconv6, skip5)
concat6 = tf.concat([upconv6, skip5], 3)
iconv6 = conv(concat6, 512, 3, 1)
upconv5 = upconv(iconv6, 256, 3, 2) #H/16
upconv5 = resize_like(upconv5, skip4)
concat5 = tf.concat([upconv5, skip4], 3)
iconv5 = conv(concat5, 256, 3, 1)
upconv4 = upconv(iconv5, 128, 3, 2) #H/8
upconv4 = resize_like(upconv4, skip3)
concat4 = tf.concat([upconv4, skip3], 3)
iconv4 = conv(concat4, 128, 3, 1)
pred4 = get_pred(iconv4)
upred4 = upsample_nn(pred4, 2)
upconv3 = upconv(iconv4, 64, 3, 2) #H/4
concat3 = tf.concat([upconv3, skip2, upred4], 3)
iconv3 = conv(concat3, 64, 3, 1)
pred3 = get_pred(iconv3)
upred3 = upsample_nn(pred3, 2)
upconv2 = upconv(iconv3, 32, 3, 2) #H/2
concat2 = tf.concat([upconv2, skip1, upred3], 3)
iconv2 = conv(concat2, 32, 3, 1)
pred2 = get_pred(iconv2)
upred2 = upsample_nn(pred2, 2)
upconv1 = upconv(iconv2, 16, 3, 2) #H
concat1 = tf.concat([upconv1, upred2], 3)
iconv1 = conv(concat1, 16, 3, 1)
pred1 = get_pred(iconv1)
return [pred1, pred2, pred3, pred4]
示例15: prediction_layer
def prediction_layer(cfg, input, name, num_outputs):
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], padding='SAME',
activation_fn=None, normalizer_fn=None,
weights_regularizer=slim.l2_regularizer(cfg.weight_decay)):
with tf.variable_scope(name):
pred = slim.conv2d_transpose(input, num_outputs,
kernel_size=[3, 3], stride=2,
scope='block4')
return pred