当前位置: 首页>>代码示例>>Python>>正文


Python sparse_ops.sparse_add方法代码示例

本文整理汇总了Python中tensorflow.python.ops.sparse_ops.sparse_add方法的典型用法代码示例。如果您正苦于以下问题:Python sparse_ops.sparse_add方法的具体用法?Python sparse_ops.sparse_add怎么用?Python sparse_ops.sparse_add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.sparse_ops的用法示例。


在下文中一共展示了sparse_ops.sparse_add方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _SparseDenseCwiseMulOrDivGrad

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_add [as 别名]
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
  """Common code for SparseDenseCwise{Mul,Div} gradients."""
  x_indices = op.inputs[0]
  x_shape = op.inputs[2]
  y = op.inputs[3]

  y_shape = math_ops.to_int64(array_ops.shape(y))
  num_added_dims = array_ops.expand_dims(
      array_ops.size(x_shape) - array_ops.size(y_shape), 0)
  augmented_y_shape = array_ops.concat(
      [array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)

  scaling = x_shape // augmented_y_shape
  scaled_indices = x_indices // scaling
  scaled_indices = array_ops.slice(scaled_indices,
                                   array_ops.concat([[0], num_added_dims], 0),
                                   [-1, -1])
  dense_vals = array_ops.gather_nd(y, scaled_indices)

  if is_mul:
    dx = grad * dense_vals
    dy_val = grad * op.inputs[1]
  else:
    dx = grad / dense_vals
    dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
  # indices can repeat after scaling, so we can't use sparse_to_dense().
  dy = sparse_ops.sparse_add(
      array_ops.zeros_like(y),
      sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))

  # (sp_indices, sp_vals, sp_shape, dense)
  return (None, dx, None, dy) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:34,代码来源:sparse_grad.py

示例2: _apply_transform

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_add [as 别名]
def _apply_transform(self, input_tensors, **kwargs):
    pair_sparsity = (isinstance(input_tensors[0], sparse_tensor.SparseTensor),
                     isinstance(input_tensors[1], sparse_tensor.SparseTensor))

    if pair_sparsity == (False, False):
      result = input_tensors[0] + input_tensors[1]
    # note tf.sparse_add accepts the mixed cases,
    # so long as at least one input is sparse.
    else:
      result = sparse_ops.sparse_add(input_tensors[0], input_tensors[1])

    # pylint: disable=not-callable
    return self.return_type(result) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:15,代码来源:sum.py

示例3: _apply_transform

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_add [as 别名]
def _apply_transform(self, input_tensors, **kwargs):
    pair_sparsity = (isinstance(input_tensors[0], sparse_tensor.SparseTensor),
                     isinstance(input_tensors[1], sparse_tensor.SparseTensor))

    if pair_sparsity == (False, False):
      result = input_tensors[0] - input_tensors[1]
    # note tf.sparse_add accepts the mixed cases,
    # so long as at least one input is sparse.
    elif not pair_sparsity[1]:
      result = sparse_ops.sparse_add(input_tensors[0], - input_tensors[1])
    else:
      result = sparse_ops.sparse_add(input_tensors[0],
                                     _negate_sparse(input_tensors[1]))
    # pylint: disable=not-callable
    return self.return_type(result) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:17,代码来源:difference.py

示例4: _SparseDenseCwiseMulOrDivGrad

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_add [as 别名]
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
  """Common code for SparseDenseCwise{Mul,Div} gradients."""
  x_indices = op.inputs[0]
  x_shape = op.inputs[2]
  y = op.inputs[3]

  y_shape = math_ops.to_int64(array_ops.shape(y))
  num_added_dims = array_ops.expand_dims(
      array_ops.size(x_shape) - array_ops.size(y_shape), 0)
  augmented_y_shape = array_ops.concat(0, [array_ops.ones(num_added_dims,
                                                          ops.dtypes.int64),
                                           y_shape])

  scaling = x_shape // augmented_y_shape
  scaled_indices = x_indices // scaling
  scaled_indices = array_ops.slice(scaled_indices,
                                   array_ops.concat(0, [[0], num_added_dims]),
                                   [-1, -1])
  dense_vals = array_ops.gather_nd(y, scaled_indices)

  if is_mul:
    dx = grad * dense_vals
    dy_val = grad * op.inputs[1]
  else:
    dx = grad / dense_vals
    dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
  # indices can repeat after scaling, so we can't use sparse_to_dense().
  dy = sparse_ops.sparse_add(
      array_ops.zeros_like(y),
      sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))

  # (sp_indices, sp_vals, sp_shape, dense)
  return (None, dx, None, dy) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:35,代码来源:sparse_grad.py

示例5: calculate_loss

# 需要导入模块: from tensorflow.python.ops import sparse_ops [as 别名]
# 或者: from tensorflow.python.ops.sparse_ops import sparse_add [as 别名]
def calculate_loss(input_mat, row_factors, col_factors, regularization=None,
                   w0=1., row_weights=None, col_weights=None):
  """Calculates the loss of a given factorization.

  Using a non distributed method, different than the one implemented in the
  WALS model. The weight of an observed entry (i, j) (i.e. such that
  input_mat[i, j] is non zero) is (w0 + row_weights[i]col_weights[j]).

  Args:
    input_mat: The input matrix, a SparseTensor of rank 2.
    row_factors: The row factors, a dense Tensor of rank 2.
    col_factors: The col factors, a dense Tensor of rank 2.
    regularization: the regularization coefficient, a scalar.
    w0: the weight of unobserved entries. A scalar.
    row_weights: A dense tensor of rank 1.
    col_weights: A dense tensor of rank 1.

  Returns:
    The total loss.
  """
  wr = (array_ops.expand_dims(row_weights, 1) if row_weights is not None
        else constant_op.constant(1.))
  wc = (array_ops.expand_dims(col_weights, 0) if col_weights is not None
        else constant_op.constant(1.))
  reg = (regularization if regularization is not None
         else constant_op.constant(0.))

  row_indices, col_indices = array_ops.split(input_mat.indices,
                                             axis=1,
                                             num_or_size_splits=2)
  gathered_row_factors = array_ops.gather(row_factors, row_indices)
  gathered_col_factors = array_ops.gather(col_factors, col_indices)
  sp_approx_vals = array_ops.squeeze(math_ops.matmul(
      gathered_row_factors, gathered_col_factors, adjoint_b=True))
  sp_approx = sparse_tensor.SparseTensor(
      indices=input_mat.indices,
      values=sp_approx_vals,
      dense_shape=input_mat.dense_shape)

  sp_approx_sq = math_ops.square(sp_approx)
  row_norm = math_ops.reduce_sum(math_ops.square(row_factors))
  col_norm = math_ops.reduce_sum(math_ops.square(col_factors))
  row_col_norm = math_ops.reduce_sum(math_ops.square(math_ops.matmul(
      row_factors, col_factors, transpose_b=True)))

  resid = sparse_ops.sparse_add(input_mat, sp_approx * (-1))
  resid_sq = math_ops.square(resid)
  loss = w0 * (
      sparse_ops.sparse_reduce_sum(resid_sq) -
      sparse_ops.sparse_reduce_sum(sp_approx_sq)
      )
  loss += (sparse_ops.sparse_reduce_sum(wr * (resid_sq * wc)) +
           w0 * row_col_norm + reg * (row_norm + col_norm))
  return loss.eval() 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:56,代码来源:factorization_ops_test_utils.py


注:本文中的tensorflow.python.ops.sparse_ops.sparse_add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。