本文整理汇总了Python中tensorflow.contrib.layers.l2_regularizer方法的典型用法代码示例。如果您正苦于以下问题:Python layers.l2_regularizer方法的具体用法?Python layers.l2_regularizer怎么用?Python layers.l2_regularizer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers
的用法示例。
在下文中一共展示了layers.l2_regularizer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: darkconv
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def darkconv(*args, **kwargs):
scope = kwargs.pop('scope', None)
onlyconv = kwargs.pop('onlyconv', False)
with tf.variable_scope(scope):
conv_kwargs = {
'padding': 'SAME',
'activation_fn': None,
'weights_initializer': variance_scaling_initializer(1.53846),
'weights_regularizer': l2(5e-4),
'biases_initializer': None,
'scope': 'conv'}
if onlyconv:
conv_kwargs.pop('biases_initializer')
with arg_scope([conv2d], **conv_kwargs):
x = conv2d(*args, **kwargs)
if onlyconv: return x
x = batch_norm(x, decay=0.99, center=False, scale=True,
epsilon=1e-5, scope='bn')
x = bias_add(x, scope='bias')
x = leaky_relu(x, alpha=0.1, name='lrelu')
return x
示例2: __init__
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
self.params = params
self.optimizer = optimizer_class(self.params.learning_rate)
self.is_regression = params.regression
self.regularizer = None
if params.regularization == "l1":
self.regularizer = layers.l1_regularizer(
self.params.regularization_strength)
elif params.regularization == "l2":
self.regularizer = layers.l2_regularizer(
self.params.regularization_strength)
示例3: __init__
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
self.device_assigner = (
device_assigner or tensor_forest.RandomForestDeviceAssigner())
self.params = params
self.optimizer = optimizer_class(self.params.learning_rate)
self.is_regression = params.regression
self.regularizer = None
if params.regularization == "l1":
self.regularizer = layers.l1_regularizer(
self.params.regularization_strength)
elif params.regularization == "l2":
self.regularizer = layers.l2_regularizer(
self.params.regularization_strength)
示例4: resnet_arg_scope
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def resnet_arg_scope(
weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
}
l2_regularizer = layers.l2_regularizer(weight_decay)
arg_scope_layers = arg_scope(
[layers.conv2d, my_layers.preact_conv2d, layers.fully_connected],
weights_initializer=layers.variance_scaling_initializer(),
weights_regularizer=l2_regularizer,
activation_fn=tf.nn.relu)
arg_scope_conv = arg_scope(
[layers.conv2d, my_layers.preact_conv2d],
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params)
with arg_scope_layers, arg_scope_conv as arg_sc:
return arg_sc
示例5: embed_labels
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def embed_labels(inputs, num_classes, output_dim, sn,
weight_decay_rate=1e-5,
reuse=None, scope=None):
# TODO move regularizer definitions to model
weights_regularizer = ly.l2_regularizer(weight_decay_rate)
with tf.variable_scope(scope, 'embedding', [inputs], reuse=reuse) as sc:
inputs = tf.convert_to_tensor(inputs)
weights = tf.get_variable(name="weights", shape=(num_classes, output_dim),
initializer=init_ops.random_normal_initializer)
# Spectral Normalization
if sn:
weights = spectral_normed_weight(weights, num_iters=1, update_collection=Config.SPECTRAL_NORM_UPDATE_OPS)
embed_out = tf.nn.embedding_lookup(weights, inputs)
return embed_out
示例6: conv2d_fixed_padding
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format,
weight_decay):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
if weight_decay is not None:
weight_decay = contrib_layers.l2_regularizer(weight_decay)
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
kernel_regularizer=weight_decay,
data_format=data_format)
示例7: forward
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def forward(self, images, num_classes=None, is_training=True):
assert num_classes is not None, 'num_classes must be given when is_training=True'
# Forward
features, _ = self.backbone(images, is_training=is_training)
# Logits
with tf.variable_scope('classifier'):
features_drop = layers.dropout(features, keep_prob=0.5, is_training=is_training)
logit = layers.fully_connected(features_drop, num_classes, activation_fn=None,
weights_initializer=tf.random_normal_initializer(stddev=0.001),
weights_regularizer=layers.l2_regularizer(self.weight_decay),
biases_initializer=None,
scope='fc_classifier')
logits = {}
logits['logits'] = logit
return logits
示例8: forward
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def forward(self, images, num_classes=None, is_training=True):
if is_training:
assert num_classes is not None, 'num_classes must be given when is_training=True'
# Forward
features = self.backbone(images, is_training=is_training)
# Logits
with tf.variable_scope('classifier'):
print(features)
logit = layers.fully_connected(features, num_classes, activation_fn=None,
weights_initializer=tf.random_normal_initializer(stddev=0.001),
weights_regularizer=layers.l2_regularizer(self.weight_decay),
biases_initializer=None,
scope='fc_classifier')
print(num_classes)
logits = {}
logits['logits'] = logit
return logits
else:
features = self.backbone(images, is_training=is_training)
features_flipped = self.backbone(tf.reverse(images, axis=[2]), is_training=is_training, reuse=True)
features = (features+features_flipped)/2
return features
示例9: testNoSummariesOnGPU
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def testNoSummariesOnGPU(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = layers.l2_regularizer(0.001)
layers.fully_connected(inputs, 30, weights_regularizer=reg)
model = model_deploy.deploy(
deploy_config, ModelFn,
optimizer=tf.train.GradientDescentOptimizer(1.0))
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
示例10: testNoSummariesOnGPUForEvals
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def testNoSummariesOnGPUForEvals(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = layers.l2_regularizer(0.001)
layers.fully_connected(inputs, 30, weights_regularizer=reg)
# No optimizer here, it's an eval.
model = model_deploy.deploy(deploy_config, ModelFn)
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
示例11: _build_regularizer
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def _build_regularizer(regularizer):
"""Builds a regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return layers.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
return layers.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
示例12: forward
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def forward(self, decoder_hidden, dec_in, decoder_category, reuse=False, trainable=True, is_training=True):
with tf.variable_scope(self.name_scope) as vs:
if(reuse):
vs.reuse_variables()
lrelu = VAE.lrelu
dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training)
y = tf.concat([decoder_hidden, dec_in_enc], 1)
h0 = tcl.fully_connected(y, 512, scope="fc3", activation_fn=lrelu, weights_regularizer=tcl.l2_regularizer(self.re_term))
h0 = tcl.dropout(h0, 0.5, is_training=is_training)
h0 = tcl.fully_connected(h0, 54, scope="fc4", activation_fn=None,
weights_regularizer=tcl.l2_regularizer(self.re_term),)
h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3)
return h0
开发者ID:chaneyddtt,项目名称:Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics,代码行数:26,代码来源:humanEncoder.py
示例13: forward
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def forward(self, dec_in, reuse=False, trainable=True, is_training=True):
with tf.variable_scope(self.name_scope) as vs:
if (reuse):
vs.reuse_variables()
lrelu = VAE.lrelu
dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training)
h0 = tcl.fully_connected(dec_in_enc, 512, scope="fc3", activation_fn=lrelu,
weights_regularizer=tcl.l2_regularizer(self.re_term))
h0 = tcl.dropout(h0, 0.5, is_training=is_training)
h0 = tcl.fully_connected(h0, 54, scope="fc4", activation_fn=None,
weights_regularizer=tcl.l2_regularizer(self.re_term), )
h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3)
return h0
开发者ID:chaneyddtt,项目名称:Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics,代码行数:22,代码来源:humanEncoder_ablation.py
示例14: forward
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def forward(self, decoder_hidden, dec_in, decoder_category, reuse=False, trainable=True, is_training=True):
with tf.variable_scope(self.name_scope) as vs:
if (reuse):
vs.reuse_variables()
lrelu = VAE.lrelu
dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training)
y = tf.concat([decoder_hidden, dec_in_enc], 1)
h0 = tcl.fully_connected(y, 512, scope="fc3", activation_fn=lrelu,
weights_regularizer=tcl.l2_regularizer(self.re_term))
h0 = tcl.dropout(h0, 0.5, is_training=is_training)
h0 = tcl.fully_connected(h0, 70, scope="fc4", activation_fn=None,
weights_regularizer=tcl.l2_regularizer(self.re_term), )
h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3)
return h0
开发者ID:chaneyddtt,项目名称:Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics,代码行数:23,代码来源:humanEncoder_cmu.py
示例15: _project
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import l2_regularizer [as 别名]
def _project(self, q, k, v, scope="Linearity", reuse=None):
"""Project queries, keys, values with a linear layer.
Note: We project the inputs for q, k, v *before* splitting to prepare inputs for each head.
This differs from the order in "Attention Is All You Need," but is functionally equivalent.
"""
def _project_one(x, d, inner_scope):
return tf_layers.fully_connected(x, d, activation_fn=None, biases_initializer=None,
weights_regularizer=tf_layers.l2_regularizer(scale=self.l2_lambda),
scope=inner_scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
q_projected = _project_one(q, self.d_model, "q")
k_projected = _project_one(k, self.d_model, "k")
v_projected = _project_one(v, self.d_model, "v")
return q_projected, k_projected, v_projected