本文整理汇总了Python中tensorflow.python.ops.nn.l2_loss函数的典型用法代码示例。如果您正苦于以下问题:Python l2_loss函数的具体用法?Python l2_loss怎么用?Python l2_loss使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了l2_loss函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: l2
def l2(weights, name=None):
"""Applies l2 regularization to weights."""
with ops.op_scope([weights], name, 'l2_regularizer') as scope:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)
示例2: l2
def l2(weights):
"""Applies l2 regularization to weights."""
with ops.name_scope(scope, 'l2_regularizer', [weights]) as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.mul(my_scale, nn.l2_loss(weights), name=name)
示例3: loop_fn
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
outputs = nn.fused_batch_norm(
x1,
scale,
offset,
mean=mean,
variance=variance,
epsilon=0.01,
data_format=data_format,
is_training=is_training)
outputs = list(outputs)
# We only test the first value of outputs when is_training is False.
# It looks like CPU and GPU have different outputs for batch_mean
# and batch_variance for this case.
if not is_training:
outputs[1] = constant_op.constant(0.)
outputs[2] = constant_op.constant(0.)
loss = nn.l2_loss(outputs[0])
if is_training:
gradients = g.gradient(loss, [x1, scale, offset])
else:
gradients = [constant_op.constant(0.)] * 3
return outputs + gradients
示例4: testGradient
def testGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
with self.test_session():
x = constant_op.constant(x_val, name="x")
output = nn.l2_loss(x)
err = gc.ComputeGradientError(x, x_shape, output, [1])
print "L2Loss gradient err = %g " % err
err_tolerance = 1e-11
self.assertLess(err, err_tolerance)
示例5: model_fn
def model_fn(inps, init_state):
state = init_state
for inp in inps:
_, state = cell(inp, state)
output = nn.l2_loss(state.c)
return gradient_ops.gradients(output, variables.trainable_variables())
示例6: loop_fn
def loop_fn(i):
with g:
x_i = array_ops.gather(x, i)
y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
loss = nn.l2_loss(y)
return y, g.gradient(loss, x_i)
示例7: testL2Loss
def testL2Loss(self):
with self.test_session():
x = constant_op.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x")
l2loss = nn.l2_loss(x)
value = l2loss.eval()
self.assertAllClose(7.0, value)