当前位置: 首页>>代码示例>>Python>>正文


Python sparse_ops.sparse_reduce_sum方法代码示例

本文整理汇总了Python中tensorflow.python.ops.sparse_ops.sparse_reduce_sum方法的典型用法代码示例。如果您正苦于以下问题:Python sparse_ops.sparse_reduce_sum方法的具体用法?Python sparse_ops.sparse_reduce_sum怎么用?Python sparse_ops.sparse_reduce_sum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.sparse_ops的用法示例。


在下文中一共展示了sparse_ops.sparse_reduce_sum方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testGradient

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_reduce_sum [as 别名]
def testGradient(self):
    np.random.seed(8161)
    test_dims = [(11, 1, 5, 7, 1), (2, 2)]
    with self.test_session(use_gpu=False):
      for dims in test_dims:
        sp_t, nnz = _sparsify(np.random.randn(*dims))
        # reduce random axes from 1D to N-D
        for d in range(1, len(dims) + 1):
          axes = np.random.choice(len(dims), size=d, replace=False).tolist()
          reduced = sparse_ops.sparse_reduce_sum(sp_t, axes)

          err = tf.test.compute_gradient_error(sp_t.values, (nnz,), reduced,
                                               reduced.eval().shape)
          self.assertLess(err, 1e-3)

        # Tests for negative axes.
        reduced = sparse_ops.sparse_reduce_sum(sp_t, -1)
        err = tf.test.compute_gradient_error(sp_t.values, (nnz,), reduced,
                                             reduced.eval().shape)
        self.assertLess(err, 1e-3) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:22,代码来源:sparse_ops_test.py

示例2: _SparseSoftmaxGrad

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_reduce_sum [as 别名]
def _SparseSoftmaxGrad(op, grad):
  """Gradients for SparseSoftmax.

  The calculation is the same as SoftmaxGrad:

    grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax

  where we now only operate on the non-zero values present in the SparseTensors.

  Args:
    op: the SparseSoftmax op.
    grad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values.

  Returns:
    Gradients w.r.t. the input (sp_indices, sp_values, sp_shape).
  """
  indices, shape = op.inputs[0], op.inputs[2]
  out_vals = op.outputs[0]
  sp_output = sparse_tensor.SparseTensor(indices, out_vals, shape)
  sp_grad = sparse_tensor.SparseTensor(indices, grad, shape)
  sp_product = sparse_tensor.SparseTensor(
      indices, sp_output.values * sp_grad.values, shape)

  # [..., B, 1], dense.
  sum_reduced = -sparse_ops.sparse_reduce_sum(sp_product, [-1], keep_dims=True)
  # sparse [..., B, C] + dense [..., B, 1] with broadcast; outputs sparse.
  sp_sum = sparse_ops.sparse_dense_cwise_add(sp_grad, sum_reduced)

  grad_x = sp_sum.values * sp_output.values
  return [None, grad_x, None] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:32,代码来源:sparse_grad.py

示例3: _build_multilabel_adjacency

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_reduce_sum [as 别名]
def _build_multilabel_adjacency(sparse_labels):
  """Builds multilabel adjacency matrix.

  As of March 14th, 2017, there's no op for the dot product between
  two sparse tensors in TF. However, there is `sparse_minimum` op which is
  equivalent to an AND op between two sparse boolean tensors.
  This computes the dot product between two sparse boolean inputs.

  Args:
    sparse_labels: List of 1-D boolean sparse tensors.

  Returns:
    adjacency_matrix: 2-D dense `Tensor`.
  """
  num_pairs = len(sparse_labels)
  adjacency_matrix = array_ops.zeros([num_pairs, num_pairs])
  for i in range(num_pairs):
    for j in range(num_pairs):
      sparse_dot_product = math_ops.cast(
          sparse_ops.sparse_reduce_sum(sparse_ops.sparse_minimum(
              sparse_labels[i], sparse_labels[j])),
          dtypes.float32)
      sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 0)
      sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 1)
      one_hot_matrix = array_ops.pad(sparse_dot_product,
                                     [[i, num_pairs-i-1],
                                      [j, num_pairs-j-1]], 'CONSTANT')
      adjacency_matrix += one_hot_matrix

  return adjacency_matrix 
开发者ID:google-research,项目名称:tf-slim,代码行数:32,代码来源:metric_learning.py

示例4: _build_multilabel_adjacency

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_reduce_sum [as 别名]
def _build_multilabel_adjacency(sparse_labels):
  """Builds multilabel adjacency matrix.

  As of March 14th, 2017, there's no op for the dot product between
  two sparse tensors in TF. However, there is `sparse_minimum` op which is
  equivalent to an AND op between two sparse boolean tensors.
  This computes the dot product between two sparse boolean inputs.

  Args:
    sparse_labels: List of 1-D boolean sparse tensors.

  Returns:
    adjacency_matrix: 2-D dense `Tensor`.
  """
  num_pairs = len(sparse_labels)
  adjacency_matrix = array_ops.zeros([num_pairs, num_pairs])
  for i in range(num_pairs):
    for j in range(num_pairs):
      sparse_dot_product = math_ops.to_float(
          sparse_ops.sparse_reduce_sum(sparse_ops.sparse_minimum(
              sparse_labels[i], sparse_labels[j])))
      sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 0)
      sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 1)
      one_hot_matrix = array_ops.pad(sparse_dot_product,
                                     [[i, num_pairs-i-1],
                                      [j, num_pairs-j-1]], 'CONSTANT')
      adjacency_matrix += one_hot_matrix

  return adjacency_matrix 
开发者ID:CongWeilin,项目名称:cluster-loss-tensorflow,代码行数:31,代码来源:metric_loss_ops.py

示例5: _compare

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_reduce_sum [as 别名]
def _compare(self, sp_t, reduction_axes, ndims, keep_dims):
    densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()

    np_ans = densified
    if reduction_axes is None:
      np_ans = np.sum(np_ans, keepdims=keep_dims)
    else:
      if not isinstance(reduction_axes, list):  # Single scalar.
        reduction_axes = [reduction_axes]
      reduction_axes = np.array(reduction_axes).astype(np.int32)
      # Handles negative axes.
      reduction_axes = (reduction_axes + ndims) % ndims
      # Loop below depends on sorted.
      reduction_axes.sort()
      for ra in reduction_axes.ravel()[::-1]:
        np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)

    with self.test_session():
      tf_dense_ans = sparse_ops.sparse_reduce_sum(sp_t, reduction_axes,
                                                  keep_dims)
      out_dense = tf_dense_ans.eval()

      tf_sparse_ans = sparse_ops.sparse_reduce_sum_sparse(sp_t, reduction_axes,
                                                          keep_dims)
      # Convert to dense for comparison purposes.
      out_sparse = sparse_ops.sparse_tensor_to_dense(tf_sparse_ans).eval()

    self.assertAllClose(np_ans, out_dense)
    self.assertAllClose(np_ans, out_sparse) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:31,代码来源:sparse_ops_test.py

示例6: testInvalidAxes

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_reduce_sum [as 别名]
def testInvalidAxes(self):
    sp_t = tf.SparseTensor(self.ind, self.vals, self.shape)
    with self.test_session(use_gpu=False):
      with self.assertRaisesOpError("Invalid reduction dimension -3"):
        sparse_ops.sparse_reduce_sum(sp_t, -3).eval()
      with self.assertRaisesOpError("Invalid reduction dimension 2"):
        sparse_ops.sparse_reduce_sum(sp_t, 2).eval() 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:9,代码来源:sparse_ops_test.py

示例7: calculate_loss

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_reduce_sum [as 别名]
def calculate_loss(input_mat, row_factors, col_factors, regularization=None,
                   w0=1., row_weights=None, col_weights=None):
  """Calculates the loss of a given factorization.

  Using a non distributed method, different than the one implemented in the
  WALS model. The weight of an observed entry (i, j) (i.e. such that
  input_mat[i, j] is non zero) is (w0 + row_weights[i]col_weights[j]).

  Args:
    input_mat: The input matrix, a SparseTensor of rank 2.
    row_factors: The row factors, a dense Tensor of rank 2.
    col_factors: The col factors, a dense Tensor of rank 2.
    regularization: the regularization coefficient, a scalar.
    w0: the weight of unobserved entries. A scalar.
    row_weights: A dense tensor of rank 1.
    col_weights: A dense tensor of rank 1.

  Returns:
    The total loss.
  """
  wr = (array_ops.expand_dims(row_weights, 1) if row_weights is not None
        else constant_op.constant(1.))
  wc = (array_ops.expand_dims(col_weights, 0) if col_weights is not None
        else constant_op.constant(1.))
  reg = (regularization if regularization is not None
         else constant_op.constant(0.))

  row_indices, col_indices = array_ops.split(input_mat.indices,
                                             axis=1,
                                             num_or_size_splits=2)
  gathered_row_factors = array_ops.gather(row_factors, row_indices)
  gathered_col_factors = array_ops.gather(col_factors, col_indices)
  sp_approx_vals = array_ops.squeeze(math_ops.matmul(
      gathered_row_factors, gathered_col_factors, adjoint_b=True))
  sp_approx = sparse_tensor.SparseTensor(
      indices=input_mat.indices,
      values=sp_approx_vals,
      dense_shape=input_mat.dense_shape)

  sp_approx_sq = math_ops.square(sp_approx)
  row_norm = math_ops.reduce_sum(math_ops.square(row_factors))
  col_norm = math_ops.reduce_sum(math_ops.square(col_factors))
  row_col_norm = math_ops.reduce_sum(math_ops.square(math_ops.matmul(
      row_factors, col_factors, transpose_b=True)))

  resid = sparse_ops.sparse_add(input_mat, sp_approx * (-1))
  resid_sq = math_ops.square(resid)
  loss = w0 * (
      sparse_ops.sparse_reduce_sum(resid_sq) -
      sparse_ops.sparse_reduce_sum(sp_approx_sq)
      )
  loss += (sparse_ops.sparse_reduce_sum(wr * (resid_sq * wc)) +
           w0 * row_col_norm + reg * (row_norm + col_norm))
  return loss.eval() 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:56,代码来源:factorization_ops_test_utils.py


注:本文中的tensorflow.python.ops.sparse_ops.sparse_reduce_sum方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。