当前位置: 首页>>代码示例>>Python>>正文


Python gen_array_ops._broadcast_gradient_args函数代码示例

本文整理汇总了Python中tensorflow.python.ops.gen_array_ops._broadcast_gradient_args函数的典型用法代码示例。如果您正苦于以下问题:Python _broadcast_gradient_args函数的具体用法?Python _broadcast_gradient_args怎么用?Python _broadcast_gradient_args使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了_broadcast_gradient_args函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _SubGrad

def _SubGrad(op, grad):
    x = op.inputs[0]
    y = op.inputs[1]
    sx = array_ops.shape(x)
    sy = array_ops.shape(y)
    rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
    return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx), array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy))
开发者ID:ChanningPing,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py

示例2: _BetaincGrad

def _BetaincGrad(op, grad):
  """Returns gradient of betainc(a, b, x) with respect to x."""
  # TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
  a, b, x = op.inputs

  # two cases: x is a scalar and a/b are same-shaped tensors, or vice
  # versa; so its sufficient to check against shape(a).
  sa = array_ops.shape(a)
  sx = array_ops.shape(x)
  # pylint: disable=protected-access
  _, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
  # pylint: enable=protected-access

  # Perform operations in log space before summing, because terms
  # can grow large.
  log_beta = (
      gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -
      gen_math_ops.lgamma(a + b))
  partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) +
                           (a - 1) * math_ops.log(x) - log_beta)

  # TODO(b/36815900): Mark None return values as NotImplemented
  return (
      None,  # da
      None,  # db
      array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:26,代码来源:math_grad.py

示例3: _reduce_and_reshape_grad

def _reduce_and_reshape_grad(g, t):
  """Returns the gradient, sum-reduced and reshaped to `t`'s shape."""
  shape = array_ops.shape(t)
  g_shape = array_ops.shape(g)
  # pylint: disable=protected-access
  bcast_dims, _ = gen_array_ops._broadcast_gradient_args(shape, g_shape)
  # pylint: enable=protected-access
  return array_ops.reshape(math_ops.reduce_sum(g, bcast_dims), shape)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:8,代码来源:scaled_softplus.py

示例4: _DivGrad

def _DivGrad(op, grad):
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)  # pylint: disable=protected-access
  return (array_ops.reshape(math_ops.reduce_sum(grad / y, rx), sx),
          array_ops.reshape(math_ops.reduce_sum(grad *
                                         (-x / math_ops.square(y)), ry), sy))
开发者ID:0ruben,项目名称:tensorflow,代码行数:9,代码来源:math_grad.py

示例5: _ComplexGrad

def _ComplexGrad(op, grad):
  """Returns the real and imaginary components of 'grad', respectively."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
          array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:9,代码来源:math_grad.py

示例6: _PowGrad

def _PowGrad(op, grad):
    """Returns grad * (y*x^(y-1), z*log(x))."""
    x = op.inputs[0]
    y = op.inputs[1]
    z = op.outputs[0]
    sx = array_ops.shape(x)
    sy = array_ops.shape(y)
    rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
    gx = array_ops.reshape(math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
    gy = array_ops.reshape(math_ops.reduce_sum(grad * z * math_ops.log(x), ry), sy)
    return gx, gy
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:11,代码来源:math_grad.py

示例7: _MulGrad

def _MulGrad(op, grad):
  """The gradient of scalar multiplication."""
  x = op.inputs[0]
  y = op.inputs[1]
  assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(math_ops.reduce_sum(grad * y, rx), sx),
          array_ops.reshape(math_ops.reduce_sum(x * grad, ry), sy))
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:12,代码来源:math_grad.py

示例8: _PowGrad

def _PowGrad(op, grad):
    """Returns grad * (y*x^(y-1), z*log(x))."""
    x = op.inputs[0]
    y = op.inputs[1]
    z = op.outputs[0]
    sx = array_ops.shape(x)
    sy = array_ops.shape(y)
    rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
    gx = array_ops.reshape(math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
    # Avoid false singularity at x = 0
    log_x = math_ops.select(x > 0, math_ops.log(x), array_ops.zeros_like(x))
    gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy)
    return gx, gy
开发者ID:ChanningPing,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py

示例9: _PolygammaGrad

def _PolygammaGrad(op, grad):
    """Returns gradient of psi(n, x) with respect to n and x."""
    # TODO(tillahoffmann): Add derivative with respect to n
    n = op.inputs[0]
    x = op.inputs[1]
    # Broadcast gradients
    sn = array_ops.shape(n)
    sx = array_ops.shape(x)
    unused_rn, rx = gen_array_ops._broadcast_gradient_args(sn, sx)
    # Evaluate gradient
    with ops.control_dependencies([grad.op]):
        partial_x = math_ops.polygamma(n + 1, x)
        return (None, array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
开发者ID:ChanningPing,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py

示例10: _ZetaGrad

def _ZetaGrad(op, grad):
    """Returns gradient of zeta(x, q) with respect to x and q."""
    # TODO(tillahoffmann): Add derivative with respect to x
    x = op.inputs[0]
    q = op.inputs[1]
    # Broadcast gradients
    sx = array_ops.shape(x)
    sq = array_ops.shape(q)
    unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
    # Evaluate gradient
    with ops.control_dependencies([grad.op]):
        partial_q = -x * math_ops.zeta(x + 1, q)
        return (None, array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
开发者ID:ChanningPing,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py

示例11: _IgammaGrad

def _IgammaGrad(op, grad):
    """Returns gradient of igamma(a, x) with respect to a and x."""
    # TODO(ebrevdo): Perhaps add the derivative w.r.t. a
    a = op.inputs[0]
    x = op.inputs[1]
    sa = array_ops.shape(a)
    sx = array_ops.shape(x)
    unused_ra, rx = gen_array_ops._broadcast_gradient_args(sa, sx)

    # Perform operations in log space before summing, because Gamma(a)
    # and Gamma'(a) can grow large.
    partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) - math_ops.lgamma(a))
    return (None, array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
开发者ID:ChanningPing,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py

示例12: _MulGrad

def _MulGrad(op, grad):
  x = op.inputs[0]
  y = op.inputs[1]
  assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  if x.dtype.base_dtype == dtypes.complex64:
    return (array_ops.reshape(math_ops.reduce_sum(grad * math_ops.conj(y), rx), sx),
            array_ops.reshape(math_ops.reduce_sum(math_ops.conj(x) * grad, ry), sy))
  else:
    return (array_ops.reshape(math_ops.reduce_sum(grad * y, rx), sx),
            array_ops.reshape(math_ops.reduce_sum(x * grad, ry), sy))
开发者ID:TeMedy,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py

示例13: _DivGrad

def _DivGrad(op, grad):
  """The gradient for the Div operator."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(math_ops.reduce_sum(math_ops.div(grad, y), rx), sx),
          array_ops.reshape(math_ops.reduce_sum(
              grad * math_ops.div(-x, math_ops.square(y)), ry), sy))
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:14,代码来源:math_grad.py

示例14: _SubGrad

def _SubGrad(op, grad):
  """Gradient for Sub."""
  x = op.inputs[0]
  y = op.inputs[1]
  if (isinstance(grad, ops.Tensor) and
      _ShapesFullySpecifiedAndEqual(x, y, grad)):
    return grad, -grad
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
          array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy))
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:14,代码来源:math_grad.py

示例15: _FloorModGrad

def _FloorModGrad(op, grad):
  """Returns grad * (1, -floor(x/y))."""
  x = math_ops.conj(op.inputs[0])
  y = math_ops.conj(op.inputs[1])

  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  floor_xy = math_ops.floor_div(x, y)
  gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
  gy = array_ops.reshape(
      math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
  return gx, gy
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:15,代码来源:math_grad.py


注:本文中的tensorflow.python.ops.gen_array_ops._broadcast_gradient_args函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。