当前位置: 首页>>代码示例>>Python>>正文


Python gen_array_ops._broadcast_gradient_args方法代码示例

本文整理汇总了Python中tensorflow.python.ops.gen_array_ops._broadcast_gradient_args方法的典型用法代码示例。如果您正苦于以下问题:Python gen_array_ops._broadcast_gradient_args方法的具体用法?Python gen_array_ops._broadcast_gradient_args怎么用?Python gen_array_ops._broadcast_gradient_args使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.gen_array_ops的用法示例。


在下文中一共展示了gen_array_ops._broadcast_gradient_args方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _BetaincGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _BetaincGrad(op, grad):
  """Returns gradient of betainc(a, b, x) with respect to x."""
  # TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
  a, b, x = op.inputs

  # two cases: x is a scalar and a/b are same-shaped tensors, or vice
  # versa; so its sufficient to check against shape(a).
  sa = array_ops.shape(a)
  sx = array_ops.shape(x)
  # pylint: disable=protected-access
  _, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
  # pylint: enable=protected-access

  # Perform operations in log space before summing, because terms
  # can grow large.
  log_beta = (gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b)
              - gen_math_ops.lgamma(a + b))
  partial_x = math_ops.exp(
      (b - 1) * math_ops.log(1 - x) + (a - 1) * math_ops.log(x) - log_beta)

  # TODO(b/36815900): Mark None return values as NotImplemented
  return (None,  # da
          None,  # db
          array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:math_grad.py

示例2: _ZetaGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _ZetaGrad(op, grad):
  """Returns gradient of zeta(x, q) with respect to x and q."""
  # TODO(tillahoffmann): Add derivative with respect to x
  x = op.inputs[0]
  q = op.inputs[1]
  # Broadcast gradients
  sx = array_ops.shape(x)
  sq = array_ops.shape(q)
  unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
  # Evaluate gradient
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    q = math_ops.conj(q)
    partial_q = -x * math_ops.zeta(x + 1, q)
    # TODO(b/36815900): Mark None return values as NotImplemented
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:math_grad.py

示例3: _PolygammaGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _PolygammaGrad(op, grad):
  """Returns gradient of psi(n, x) with respect to n and x."""
  # TODO(tillahoffmann): Add derivative with respect to n
  n = op.inputs[0]
  x = op.inputs[1]
  # Broadcast gradients
  sn = array_ops.shape(n)
  sx = array_ops.shape(x)
  unused_rn, rx = gen_array_ops._broadcast_gradient_args(sn, sx)
  # Evaluate gradient
  with ops.control_dependencies([grad.op]):
    n = math_ops.conj(n)
    x = math_ops.conj(x)
    partial_x = math_ops.polygamma(n + 1, x)
    # TODO(b/36815900): Mark None return values as NotImplemented
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:math_grad.py

示例4: _RealDivGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _RealDivGrad(op, grad):
  """RealDiv op gradient."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(
      math_ops.reduce_sum(math_ops.realdiv(grad, y), rx),
      sx), array_ops.reshape(
          math_ops.reduce_sum(grad * math_ops.realdiv(math_ops.realdiv(-x, y), y),
                              ry), sy)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:18,代码来源:math_grad.py

示例5: _PowGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _PowGrad(op, grad):
  """Returns grad * (y*x^(y-1), z*log(x))."""
  x = op.inputs[0]
  y = op.inputs[1]
  z = op.outputs[0]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  z = math_ops.conj(z)
  gx = array_ops.reshape(
      math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
  # Avoid false singularity at x = 0
  if x.dtype.is_complex:
    # real(x) < 0 is fine for the complex case
    log_x = array_ops.where(
        math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))
  else:
    # There's no sensible real value to return if x < 0, so return 0
    log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x))
  gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy)
  return gx, gy 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:25,代码来源:math_grad.py

示例6: _MaximumMinimumGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _MaximumMinimumGrad(op, grad, selector_op):
  """Factor out the code for the gradient of Maximum or Minimum."""
  x = op.inputs[0]
  y = op.inputs[1]
  gdtype = grad.dtype
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  gradshape = array_ops.shape(grad)
  zeros = array_ops.zeros(gradshape, gdtype)
  xmask = selector_op(x, y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  xgrad = array_ops.where(xmask, grad, zeros)
  ygrad = array_ops.where(math_ops.logical_not(xmask), grad, zeros)
  gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
  gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
  return (gx, gy) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:18,代码来源:math_grad.py

示例7: _ZetaGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _ZetaGrad(op, grad):
  """Returns gradient of zeta(x, q) with respect to x and q."""
  # TODO(tillahoffmann): Add derivative with respect to x
  x = op.inputs[0]
  q = op.inputs[1]
  # Broadcast gradients
  sx = array_ops.shape(x)
  sq = array_ops.shape(q)
  unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
  # Evaluate gradient
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    q = math_ops.conj(q)
    partial_q = -x * math_ops.zeta(x + 1, q)
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq)) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:18,代码来源:math_grad.py

示例8: _PolygammaGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _PolygammaGrad(op, grad):
  """Returns gradient of psi(n, x) with respect to n and x."""
  # TODO(tillahoffmann): Add derivative with respect to n
  n = op.inputs[0]
  x = op.inputs[1]
  # Broadcast gradients
  sn = array_ops.shape(n)
  sx = array_ops.shape(x)
  unused_rn, rx = gen_array_ops._broadcast_gradient_args(sn, sx)
  # Evaluate gradient
  with ops.control_dependencies([grad.op]):
    n = math_ops.conj(n)
    x = math_ops.conj(x)
    partial_x = math_ops.polygamma(n + 1, x)
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:18,代码来源:math_grad.py

示例9: _SquaredDifferenceGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _SquaredDifferenceGrad(op, grad):
  """Returns the gradient for (x-y)^2."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  # .op works with Tensors or IndexedSlices
  with ops.control_dependencies([grad.op]):
    # The parens ensure that if grad is IndexedSlices, it'll get multiplied by
    # Tensor (not a number like 2.0) which causes it to convert to Tensor.
    x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
  return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
          -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))


# Logical operations have no gradients. 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:21,代码来源:math_grad.py

示例10: _PowGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _PowGrad(op, grad):
  """Returns grad * (y*x^(y-1), z*log(x))."""
  x = op.inputs[0]
  y = op.inputs[1]
  z = op.outputs[0]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  z = math_ops.conj(z)
  gx = array_ops.reshape(
      math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
  # Avoid false singularity at x = 0
  if x.dtype.is_complex:
    # real(x) < 0 is fine for the complex case
    log_x = math_ops.select(
        math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))
  else:
    # There's no sensible real value to return if x < 0, so return 0
    log_x = math_ops.select(x > 0, math_ops.log(x), array_ops.zeros_like(x))
  gy = array_ops.reshape(
      math_ops.reduce_sum(grad * z * log_x, ry), sy)
  return gx, gy 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:26,代码来源:math_grad.py

示例11: _MaximumMinimumGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _MaximumMinimumGrad(op, grad, selector_op):
  """Factor out the code for the gradient of Maximum or Minimum."""
  x = op.inputs[0]
  y = op.inputs[1]
  gdtype = grad.dtype
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  gradshape = array_ops.shape(grad)
  zeros = array_ops.zeros(gradshape, gdtype)
  xmask = selector_op(x, y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  xgrad = math_ops.select(xmask, grad, zeros)
  ygrad = math_ops.select(math_ops.logical_not(xmask), grad, zeros)
  gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
  gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
  return (gx, gy) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:18,代码来源:math_grad.py

示例12: _IgammaGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _IgammaGrad(op, grad):
  """Returns gradient of igamma(a, x) with respect to x."""
  # TODO(ebrevdo): Perhaps add the derivative w.r.t. a
  a = op.inputs[0]
  x = op.inputs[1]
  sa = array_ops.shape(a)
  sx = array_ops.shape(x)
  # pylint: disable=protected-access
  unused_ra, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
  # pylint: enable=protected-access

  # Perform operations in log space before summing, because Gamma(a)
  # and Gamma'(a) can grow large.
  partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) - math_ops.lgamma(a))
  # TODO(b/36815900): Mark None return values as NotImplemented
  return (None,
          array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:19,代码来源:math_grad.py

示例13: _ZetaGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _ZetaGrad(op, grad):
  """Returns gradient of zeta(x, q) with respect to x and q."""
  # TODO(tillahoffmann): Add derivative with respect to x
  x = op.inputs[0]
  q = op.inputs[1]
  # Broadcast gradients
  sx = array_ops.shape(x)
  sq = array_ops.shape(q)
  # pylint: disable=protected-access
  unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
  # pylint: enable=protected-access
  # Evaluate gradient
  with ops.control_dependencies([grad]):
    x = math_ops.conj(x)
    q = math_ops.conj(q)
    partial_q = -x * math_ops.zeta(x + 1, q)
    # TODO(b/36815900): Mark None return values as NotImplemented
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq)) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:21,代码来源:math_grad.py

示例14: _PolygammaGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _PolygammaGrad(op, grad):
  """Returns gradient of psi(n, x) with respect to n and x."""
  # TODO(tillahoffmann): Add derivative with respect to n
  n = op.inputs[0]
  x = op.inputs[1]
  # Broadcast gradients
  sn = array_ops.shape(n)
  sx = array_ops.shape(x)
  # pylint: disable=protected-access
  unused_rn, rx = gen_array_ops._broadcast_gradient_args(sn, sx)
  # pylint: enable=protected-access
  # Evaluate gradient
  with ops.control_dependencies([grad]):
    n = math_ops.conj(n)
    x = math_ops.conj(x)
    partial_x = math_ops.polygamma(n + 1, x)
    # TODO(b/36815900): Mark None return values as NotImplemented
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:21,代码来源:math_grad.py

示例15: _SquaredDifferenceGrad

# 需要导入模块: from tensorflow.python.ops import gen_array_ops [as 别名]
# 或者: from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args [as 别名]
def _SquaredDifferenceGrad(op, grad):
  """Returns the gradient for (x-y)^2."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  with ops.control_dependencies([grad]):
    # The parens ensure that if grad is IndexedSlices, it'll get multiplied by
    # Tensor (not a number like 2.0) which causes it to convert to Tensor.
    x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
  return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
          -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))


# Logical operations have no gradients. 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:20,代码来源:math_grad.py


注:本文中的tensorflow.python.ops.gen_array_ops._broadcast_gradient_args方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。