当前位置: 首页>>代码示例>>Python>>正文


Python nn_ops.l2_loss方法代码示例

本文整理汇总了Python中tensorflow.python.ops.nn_ops.l2_loss方法的典型用法代码示例。如果您正苦于以下问题:Python nn_ops.l2_loss方法的具体用法?Python nn_ops.l2_loss怎么用?Python nn_ops.l2_loss使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.nn_ops的用法示例。


在下文中一共展示了nn_ops.l2_loss方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_unary_ops

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import l2_loss [as 别名]
def test_unary_ops(self):
    ops = [
        ('relu', nn_ops.relu, nn.relu),
        ('relu6', nn_ops.relu6, nn.relu6),
        ('crelu', nn_ops.crelu, nn.crelu),
        ('elu', nn_ops.elu, nn.elu),
        ('softplus', nn_ops.softplus, nn.softplus),
        ('l2_loss', nn_ops.l2_loss, nn.l2_loss),
        ('softmax', nn_ops.softmax, nn.softmax),
        ('log_softmax', nn_ops.log_softmax, nn.log_softmax),
    ]
    for op_name, tf_op, lt_op in ops:
      golden_tensor = tf_op(self.original_lt.tensor)
      golden_lt = core.LabeledTensor(golden_tensor, self.axes)
      actual_lt = lt_op(self.original_lt)
      self.assertIn(op_name, actual_lt.name)
      self.assertLabeledTensorsEqual(golden_lt, actual_lt) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:19,代码来源:nn_test.py

示例2: l2norm_squared

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import l2_loss [as 别名]
def l2norm_squared(v):
  return constant_op.constant(2, dtype=v.dtype.base_dtype) * nn_ops.l2_loss(v) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:4,代码来源:util.py

示例3: testVariableWithRegularizer

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import l2_loss [as 别名]
def testVariableWithRegularizer(self):
    with self.cached_session():
      with variable_scope.variable_scope('A'):
        a = variables_lib2.variable('a', [], regularizer=nn_ops.l2_loss)
      loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
      self.assertDeviceEqual(loss.device, a.device) 
开发者ID:google-research,项目名称:tf-slim,代码行数:8,代码来源:variables_test.py

示例4: testVariableWithRegularizerColocate

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import l2_loss [as 别名]
def testVariableWithRegularizerColocate(self):
    with self.cached_session():
      with variable_scope.variable_scope('A'):
        a = variables_lib2.variable(
            'a', [], device='gpu:0', regularizer=nn_ops.l2_loss)
      loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
      self.assertDeviceEqual(loss.device, a.device) 
开发者ID:google-research,项目名称:tf-slim,代码行数:9,代码来源:variables_test.py

示例5: testCreateConvWithWD

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import l2_loss [as 别名]
def testCreateConvWithWD(self):
    height, width = 7, 9
    weight_decay = 0.01
    with self.cached_session() as sess:
      images = random_ops.random_uniform((5, height, width, 3), seed=1)
      regularizer = regularizers.l2_regularizer(weight_decay)
      layers_lib.convolution2d(
          images, 32, [3, 3], weights_regularizer=regularizer)
      l2_loss = nn_ops.l2_loss(variables.get_variables_by_name('weights')[0])
      wd = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
      self.assertEqual(wd.op.name, 'Conv/kernel/Regularizer/l2_regularizer')
      sess.run(variables_lib.global_variables_initializer())
      self.assertAlmostEqual(sess.run(wd), weight_decay * l2_loss.eval()) 
开发者ID:google-research,项目名称:tf-slim,代码行数:15,代码来源:layers_test.py

示例6: _computeGradient

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import l2_loss [as 别名]
def _computeGradient(self, np_input, bias, dtype, data_format):
    input_shape = output_shape = np_input.shape
    bias_shape = bias.shape
    input_tensor = constant_op.constant(
        np_input, shape=input_shape, dtype=dtype)
    bias_tensor = constant_op.constant(bias, shape=bias_shape, dtype=dtype)

    if context.executing_eagerly():

      def bias_add(input_tensor, bias_tensor):
        return nn_ops.bias_add(
            input_tensor, bias_tensor, data_format=data_format)

      # The following is a work-around for TF issue 33660. Instead of
      # calculating the analytical and numerical gradients for both
      # inputs in a single call to compute_gradient, compute_gradient
      # is called for each input separately.
      def bias_add_1(input_tensor):
        return bias_add(input_tensor, bias_tensor)

      def bias_add_2(bias_tensor):
        return bias_add(input_tensor, bias_tensor)

      input_jacob_a, input_jacob_n = gradient_checker_v2.compute_gradient(
          bias_add_1, [input_tensor])
      bias_jacob_a, bias_jacob_n = gradient_checker_v2.compute_gradient(
          bias_add_2, [bias_tensor])

      # Test gradient of BiasAddGrad
      def bias_add_grad_function(upstream_gradients):
        with backprop.GradientTape() as tape:
          tape.watch(bias_tensor)
          bias_add_output = bias_add(input_tensor, bias_tensor)
          gradient_injector_output = bias_add_output * upstream_gradients
          return tape.gradient(gradient_injector_output, bias_tensor)

      upstream_tensor = self._random_tensor(output_shape, dtype)
      grad_jacob_a, grad_jacob_n = gradient_checker_v2.compute_gradient(
          bias_add_grad_function, [upstream_tensor])
    else:
      output_tensor = nn_ops.bias_add(
          input_tensor, bias_tensor, data_format=data_format)
      jacobians = gradient_checker.compute_gradient(
          [input_tensor, bias_tensor], [input_shape, bias_shape],
          output_tensor, output_shape)
      (input_jacob_a, input_jacob_n), (bias_jacob_a, bias_jacob_n) = jacobians
      # Test gradient of BiasAddGrad
      bias_add_grad = gradients_impl.gradients(
          nn_ops.l2_loss(output_tensor), bias_tensor)[0]
      grad_jacob_a, grad_jacob_n = gradient_checker.compute_gradient(
          output_tensor, output_shape, bias_add_grad, bias_shape)

    return ((input_jacob_a, bias_jacob_a, grad_jacob_a),
            (input_jacob_n, bias_jacob_n, grad_jacob_n)) 
开发者ID:NVIDIA,项目名称:framework-determinism,代码行数:56,代码来源:test_patch_bias_add.py

示例7: global_norm

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import l2_loss [as 别名]
def global_norm(t_list, name=None):
  """Computes the global norm of multiple tensors.

  Given a tuple or list of tensors `t_list`, this operation returns the
  global norm of the elements in all tensors in `t_list`. The global norm is
  computed as:

  `global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`

  Any entries in `t_list` that are of type None are ignored.

  Args:
    t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
    name: A name for the operation (optional).

  Returns:
    A 0-D (scalar) `Tensor` of type `float`.

  Raises:
    TypeError: If `t_list` is not a sequence.
  """
  if (not isinstance(t_list, collections.Sequence)
      or isinstance(t_list, six.string_types)):
    raise TypeError("t_list should be a sequence")
  t_list = list(t_list)
  with ops.name_scope(name, "global_norm", t_list) as name:
    values = [
        ops.convert_to_tensor(
            t.values if isinstance(t, ops.IndexedSlices) else t,
            name="t_%d" % i)
        if t is not None else t
        for i, t in enumerate(t_list)]
    half_squared_norms = []
    for v in values:
      if v is not None:
        with ops.colocate_with(v):
          half_squared_norms.append(nn_ops.l2_loss(v))

    half_squared_norm = math_ops.reduce_sum(array_ops.pack(half_squared_norms))

    norm = math_ops.sqrt(
        half_squared_norm *
        constant_op.constant(2.0, dtype=half_squared_norm.dtype),
        name="global_norm")

  return norm 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:48,代码来源:clip_ops.py


注:本文中的tensorflow.python.ops.nn_ops.l2_loss方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。