當前位置: 首頁>>代碼示例>>Python>>正文


Python math_ops.reduce_mean方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.math_ops.reduce_mean方法的典型用法代碼示例。如果您正苦於以下問題:Python math_ops.reduce_mean方法的具體用法?Python math_ops.reduce_mean怎麽用?Python math_ops.reduce_mean使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.math_ops的用法示例。


在下文中一共展示了math_ops.reduce_mean方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _covariance

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def _covariance(x, diag):
  """Defines the covariance operation of a matrix.

  Args:
    x: a matrix Tensor. Dimension 0 should contain the number of examples.
    diag: if True, it computes the diagonal covariance.

  Returns:
    A Tensor representing the covariance of x. In the case of
  diagonal matrix just the diagonal is returned.
  """
  num_points = math_ops.to_float(array_ops.shape(x)[0])
  x -= math_ops.reduce_mean(x, 0, keep_dims=True)
  if diag:
    cov = math_ops.reduce_sum(
        math_ops.square(x), 0, keep_dims=True) / (num_points - 1)
  else:
    cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1)
  return cov 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:21,代碼來源:gmm_ops.py

示例2: var

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def var(x, axis=None, keepdims=False):
  """Variance of a tensor, alongside the specified axis.

  Arguments:
      x: A tensor or variable.
      axis: An integer, the axis to compute the variance.
      keepdims: A boolean, whether to keep the dimensions or not.
          If `keepdims` is `False`, the rank of the tensor is reduced
          by 1. If `keepdims` is `True`,
          the reduced dimension is retained with length 1.

  Returns:
      A tensor with the variance of elements of `x`.
  """
  axis = _normalize_axis(axis, ndim(x))
  if x.dtype.base_dtype == dtypes_module.bool:
    x = math_ops.cast(x, floatx())
  m = math_ops.reduce_mean(x, reduction_indices=axis, keep_dims=True)
  devs_squared = math_ops.square(x - m)
  return math_ops.reduce_mean(
      devs_squared, reduction_indices=axis, keep_dims=keepdims) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:23,代碼來源:backend.py

示例3: loss

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              labels=array_ops.squeeze(math_ops.to_int32(labels)),
              logits=self.training_inference_graph(data)),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:20,代碼來源:hybrid_model.py

示例4: testTrainEvalWithReuse

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 231, 231
    eval_height, eval_width = 281, 281
    num_classes = 1000
    with self.test_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = overfeat.overfeat(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = overfeat.overfeat(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:24,代碼來源:overfeat_test.py

示例5: testTrainEvalWithReuse

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 300, 400
    num_classes = 1000
    with self.test_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = alexnet.alexnet_v2(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = alexnet.alexnet_v2(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 4, 7, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:24,代碼來源:alexnet_test.py

示例6: testTrainEvalWithReuse

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_16(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_16(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:24,代碼來源:vgg_test.py

示例7: testTrainEvalWithReuse

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 231, 231
    eval_height, eval_width = 281, 281
    num_classes = 1000
    with self.cached_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = overfeat.overfeat(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = overfeat.overfeat(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size]) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:24,代碼來源:overfeat_test.py

示例8: testTrainEvalWithReuse

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 300, 400
    num_classes = 1000
    with self.cached_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = alexnet.alexnet_v2(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = alexnet.alexnet_v2(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 4, 7, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size]) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:24,代碼來源:alexnet_test.py

示例9: testTrainEvalWithReuse

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.cached_session():
      train_inputs = random_ops.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_a(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      variable_scope.get_variable_scope().reuse_variables()
      eval_inputs = random_ops.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_a(
          eval_inputs, is_training=False, spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = math_ops.reduce_mean(logits, [1, 2])
      predictions = math_ops.argmax(logits, 1)
      self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size]) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:24,代碼來源:vgg_test.py

示例10: loss

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              self.training_inference_graph(data),
              array_ops.squeeze(math_ops.to_int32(labels))),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:20,代碼來源:hybrid_model.py

示例11: _rescale_eval_loss

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def _rescale_eval_loss(loss, weights):
  """Rescales evaluation loss according to the given weights.

  The rescaling is needed because in the training loss weights are not
  considered in the denominator, whereas  for the evaluation loss we should
  divide by the sum of weights.

  The rescaling factor is:
    R = sum_{i} 1 / sum_{i} w_{i}

  Args:
    loss: the scalar weighted loss.
    weights: weight coefficients. Either a scalar, or a `Tensor` of shape
      [batch_size].

  Returns:
    The given loss multiplied by the rescaling factor.
  """
  rescaling_factor = math_ops.reduce_mean(weights)
  return math_ops.div(loss, rescaling_factor) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:22,代碼來源:dnn.py

示例12: per_image_standardization

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def per_image_standardization(image):
  """Linearly scales `image` to have zero mean and unit norm.

  This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
  of all values in image, and
  `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.

  `stddev` is the standard deviation of all values in `image`. It is capped
  away from zero to protect against division by 0 when handling uniform images.

  Args:
    image: 3-D tensor of shape `[height, width, channels]`.

  Returns:
    The standardized image with same shape as `image`.

  Raises:
    ValueError: if the shape of 'image' is incompatible with this function.
  """
  image = ops.convert_to_tensor(image, name='image')
  image = control_flow_ops.with_dependencies(
      _Check3DImage(image, require_static=False), image)
  num_pixels = math_ops.reduce_prod(array_ops.shape(image))

  image = math_ops.cast(image, dtype=dtypes.float32)
  image_mean = math_ops.reduce_mean(image)

  variance = (math_ops.reduce_mean(math_ops.square(image)) -
              math_ops.square(image_mean))
  variance = gen_nn_ops.relu(variance)
  stddev = math_ops.sqrt(variance)

  # Apply a minimum normalization that protects us against uniform images.
  min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32))
  pixel_value_scale = math_ops.maximum(stddev, min_stddev)
  pixel_value_offset = image_mean

  image = math_ops.subtract(image, pixel_value_offset)
  image = math_ops.div(image, pixel_value_scale)
  return image 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:42,代碼來源:image_ops_impl.py

示例13: zero_fraction

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def zero_fraction(value, name=None):
  """Returns the fraction of zeros in `value`.

  If `value` is empty, the result is `nan`.

  This is useful in summaries to measure and report sparsity.  For example,

  ```python
      z = tf.nn.relu(...)
      summ = tf.summary.scalar('sparsity', tf.nn.zero_fraction(z))
  ```

  Args:
    value: A tensor of numeric type.
    name: A name for the operation (optional).

  Returns:
    The fraction of zeros in `value`, with type `float32`.
  """
  with ops.name_scope(name, "zero_fraction", [value]):
    value = ops.convert_to_tensor(value, name="value")
    zero = constant_op.constant(0, dtype=value.dtype, name="zero")
    return math_ops.reduce_mean(
        math_ops.cast(math_ops.equal(value, zero), dtypes.float32))


# pylint: disable=redefined-builtin 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:29,代碼來源:nn_impl.py

示例14: _do_layer_inference

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def _do_layer_inference(self, layer, data):

    # If this is a collection of layers, return the mean of their inference
    # results.
    if isinstance(layer, collections.Iterable):
      return math_ops.reduce_mean(
          array_ops.stack([l.inference_graph(data) for l in layer]), 0)
    # If this is a single layer, return its inference result.
    else:
      return layer.inference_graph(data) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:12,代碼來源:hybrid_model.py

示例15: average_size

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import reduce_mean [as 別名]
def average_size(self):
    """Constructs a TF graph for evaluating the average size of a forest.

    Returns:
      The average number of nodes over the trees.
    """
    sizes = []
    for i in range(self.params.num_trees):
      with ops.device(self.variables.device_dummies[i].device):
        sizes.append(self.trees[i].size())
    return math_ops.reduce_mean(math_ops.to_float(array_ops.stack(sizes)))

  # pylint: disable=unused-argument 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:15,代碼來源:tensor_forest.py


注:本文中的tensorflow.python.ops.math_ops.reduce_mean方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。