当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.reduce_mean方法代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.reduce_mean方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.reduce_mean方法的具体用法?Python math_ops.reduce_mean怎么用?Python math_ops.reduce_mean使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.math_ops的用法示例。


在下文中一共展示了math_ops.reduce_mean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _covariance

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def _covariance(x, diag):
  """Defines the covariance operation of a matrix.

  Args:
    x: a matrix Tensor. Dimension 0 should contain the number of examples.
    diag: if True, it computes the diagonal covariance.

  Returns:
    A Tensor representing the covariance of x. In the case of
  diagonal matrix just the diagonal is returned.
  """
  num_points = math_ops.to_float(array_ops.shape(x)[0])
  x -= math_ops.reduce_mean(x, 0, keep_dims=True)
  if diag:
    cov = math_ops.reduce_sum(
        math_ops.square(x), 0, keep_dims=True) / (num_points - 1)
  else:
    cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1)
  return cov 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:21,代码来源:gmm_ops.py

示例2: var

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def var(x, axis=None, keepdims=False):
  """Variance of a tensor, alongside the specified axis.

  Arguments:
      x: A tensor or variable.
      axis: An integer, the axis to compute the variance.
      keepdims: A boolean, whether to keep the dimensions or not.
          If `keepdims` is `False`, the rank of the tensor is reduced
          by 1. If `keepdims` is `True`,
          the reduced dimension is retained with length 1.

  Returns:
      A tensor with the variance of elements of `x`.
  """
  axis = _normalize_axis(axis, ndim(x))
  if x.dtype.base_dtype == dtypes_module.bool:
    x = math_ops.cast(x, floatx())
  m = math_ops.reduce_mean(x, reduction_indices=axis, keep_dims=True)
  devs_squared = math_ops.square(x - m)
  return math_ops.reduce_mean(
      devs_squared, reduction_indices=axis, keep_dims=keepdims) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:backend.py

示例3: loss

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              labels=array_ops.squeeze(math_ops.to_int32(labels)),
              logits=self.training_inference_graph(data)),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:hybrid_model.py

示例4: testTrainEvalWithReuse

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 231, 231
    eval_height, eval_width = 281, 281
    num_classes = 1000
    with self.test_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = overfeat.overfeat(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = overfeat.overfeat(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:24,代码来源:overfeat_test.py

示例5: testTrainEvalWithReuse

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 300, 400
    num_classes = 1000
    with self.test_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = alexnet.alexnet_v2(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = alexnet.alexnet_v2(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 4, 7, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:24,代码来源:alexnet_test.py

示例6: testTrainEvalWithReuse

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_16(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_16(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:24,代码来源:vgg_test.py

示例7: testTrainEvalWithReuse

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 231, 231
    eval_height, eval_width = 281, 281
    num_classes = 1000
    with self.cached_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = overfeat.overfeat(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = overfeat.overfeat(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size]) 
开发者ID:google-research,项目名称:tf-slim,代码行数:24,代码来源:overfeat_test.py

示例8: testTrainEvalWithReuse

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 300, 400
    num_classes = 1000
    with self.cached_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = alexnet.alexnet_v2(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = alexnet.alexnet_v2(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 4, 7, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size]) 
开发者ID:google-research,项目名称:tf-slim,代码行数:24,代码来源:alexnet_test.py

示例9: testTrainEvalWithReuse

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.cached_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_a(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_a(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size]) 
开发者ID:google-research,项目名称:tf-slim,代码行数:24,代码来源:vgg_test.py

示例10: loss

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              self.training_inference_graph(data),
              array_ops.squeeze(math_ops.to_int32(labels))),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:20,代码来源:hybrid_model.py

示例11: _rescale_eval_loss

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def _rescale_eval_loss(loss, weights):
  """Rescales evaluation loss according to the given weights.

  The rescaling is needed because in the training loss weights are not
  considered in the denominator, whereas  for the evaluation loss we should
  divide by the sum of weights.

  The rescaling factor is:
    R = sum_{i} 1 / sum_{i} w_{i}

  Args:
    loss: the scalar weighted loss.
    weights: weight coefficients. Either a scalar, or a `Tensor` of shape
      [batch_size].

  Returns:
    The given loss multiplied by the rescaling factor.
  """
  rescaling_factor = math_ops.reduce_mean(weights)
  return math_ops.div(loss, rescaling_factor) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:22,代码来源:dnn.py

示例12: per_image_standardization

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def per_image_standardization(image):
  """Linearly scales `image` to have zero mean and unit norm.

  This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
  of all values in image, and
  `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.

  `stddev` is the standard deviation of all values in `image`. It is capped
  away from zero to protect against division by 0 when handling uniform images.

  Args:
    image: 3-D tensor of shape `[height, width, channels]`.

  Returns:
    The standardized image with same shape as `image`.

  Raises:
    ValueError: if the shape of 'image' is incompatible with this function.
  """
  image = ops.convert_to_tensor(image, name='image')
  image = control_flow_ops.with_dependencies(
      _Check3DImage(image, require_static=False), image)
  num_pixels = math_ops.reduce_prod(array_ops.shape(image))

  image = math_ops.cast(image, dtype=dtypes.float32)
  image_mean = math_ops.reduce_mean(image)

  variance = (math_ops.reduce_mean(math_ops.square(image)) -
              math_ops.square(image_mean))
  variance = gen_nn_ops.relu(variance)
  stddev = math_ops.sqrt(variance)

  # Apply a minimum normalization that protects us against uniform images.
  min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32))
  pixel_value_scale = math_ops.maximum(stddev, min_stddev)
  pixel_value_offset = image_mean

  image = math_ops.subtract(image, pixel_value_offset)
  image = math_ops.div(image, pixel_value_scale)
  return image 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:42,代码来源:image_ops_impl.py

示例13: zero_fraction

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def zero_fraction(value, name=None):
  """Returns the fraction of zeros in `value`.

  If `value` is empty, the result is `nan`.

  This is useful in summaries to measure and report sparsity.  For example,

  ```python
      z = tf.nn.relu(...)
      summ = tf.summary.scalar('sparsity', tf.nn.zero_fraction(z))
  ```

  Args:
    value: A tensor of numeric type.
    name: A name for the operation (optional).

  Returns:
    The fraction of zeros in `value`, with type `float32`.
  """
  with ops.name_scope(name, "zero_fraction", [value]):
    value = ops.convert_to_tensor(value, name="value")
    zero = constant_op.constant(0, dtype=value.dtype, name="zero")
    return math_ops.reduce_mean(
        math_ops.cast(math_ops.equal(value, zero), dtypes.float32))


# pylint: disable=redefined-builtin 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:29,代码来源:nn_impl.py

示例14: _do_layer_inference

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def _do_layer_inference(self, layer, data):

    # If this is a collection of layers, return the mean of their inference
    # results.
    if isinstance(layer, collections.Iterable):
      return math_ops.reduce_mean(
          array_ops.stack([l.inference_graph(data) for l in layer]), 0)
    # If this is a single layer, return its inference result.
    else:
      return layer.inference_graph(data) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:12,代码来源:hybrid_model.py

示例15: average_size

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 别名]
def average_size(self):
    """Constructs a TF graph for evaluating the average size of a forest.

    Returns:
      The average number of nodes over the trees.
    """
    sizes = []
    for i in range(self.params.num_trees):
      with ops.device(self.variables.device_dummies[i].device):
        sizes.append(self.trees[i].size())
    return math_ops.reduce_mean(math_ops.to_float(array_ops.stack(sizes)))

  # pylint: disable=unused-argument 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:15,代码来源:tensor_forest.py


注:本文中的tensorflow.python.ops.math_ops.reduce_mean方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。