當前位置: 首頁>>代碼示例>>Python>>正文


Python math_ops.conj方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.math_ops.conj方法的典型用法代碼示例。如果您正苦於以下問題:Python math_ops.conj方法的具體用法?Python math_ops.conj怎麽用?Python math_ops.conj使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.math_ops的用法示例。


在下文中一共展示了math_ops.conj方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _ZetaGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _ZetaGrad(op, grad):
  """Returns gradient of zeta(x, q) with respect to x and q."""
  # TODO(tillahoffmann): Add derivative with respect to x
  x = op.inputs[0]
  q = op.inputs[1]
  # Broadcast gradients
  sx = array_ops.shape(x)
  sq = array_ops.shape(q)
  unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
  # Evaluate gradient
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    q = math_ops.conj(q)
    partial_q = -x * math_ops.zeta(x + 1, q)
    # TODO(b/36815900): Mark None return values as NotImplemented
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq)) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:math_grad.py

示例2: _RealDivGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _RealDivGrad(op, grad):
  """RealDiv op gradient."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(
      math_ops.reduce_sum(math_ops.realdiv(grad, y), rx),
      sx), array_ops.reshape(
          math_ops.reduce_sum(grad * math_ops.realdiv(math_ops.realdiv(-x, y), y),
                              ry), sy)) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:18,代碼來源:math_grad.py

示例3: _MatMulGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _MatMulGrad(op, grad):
  """Gradient for MatMul."""

  t_a = op.get_attr("transpose_a")
  t_b = op.get_attr("transpose_b")
  a = math_ops.conj(op.inputs[0])
  b = math_ops.conj(op.inputs[1])
  if not t_a and not t_b:
    grad_a = math_ops.matmul(grad, b, transpose_b=True)
    grad_b = math_ops.matmul(a, grad, transpose_a=True)
  elif not t_a and t_b:
    grad_a = math_ops.matmul(grad, b)
    grad_b = math_ops.matmul(grad, a, transpose_a=True)
  elif t_a and not t_b:
    grad_a = math_ops.matmul(b, grad, transpose_b=True)
    grad_b = math_ops.matmul(a, grad)
  elif t_a and t_b:
    grad_a = math_ops.matmul(b, grad, transpose_a=True, transpose_b=True)
    grad_b = math_ops.matmul(grad, a, transpose_a=True, transpose_b=True)
  return grad_a, grad_b 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:22,代碼來源:math_grad.py

示例4: _ZetaGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _ZetaGrad(op, grad):
  """Returns gradient of zeta(x, q) with respect to x and q."""
  # TODO(tillahoffmann): Add derivative with respect to x
  x = op.inputs[0]
  q = op.inputs[1]
  # Broadcast gradients
  sx = array_ops.shape(x)
  sq = array_ops.shape(q)
  unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
  # Evaluate gradient
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    q = math_ops.conj(q)
    partial_q = -x * math_ops.zeta(x + 1, q)
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq)) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:18,代碼來源:math_grad.py

示例5: _PolygammaGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _PolygammaGrad(op, grad):
  """Returns gradient of psi(n, x) with respect to n and x."""
  # TODO(tillahoffmann): Add derivative with respect to n
  n = op.inputs[0]
  x = op.inputs[1]
  # Broadcast gradients
  sn = array_ops.shape(n)
  sx = array_ops.shape(x)
  unused_rn, rx = gen_array_ops._broadcast_gradient_args(sn, sx)
  # Evaluate gradient
  with ops.control_dependencies([grad.op]):
    n = math_ops.conj(n)
    x = math_ops.conj(x)
    partial_x = math_ops.polygamma(n + 1, x)
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:18,代碼來源:math_grad.py

示例6: _Expm1Grad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _Expm1Grad(op, grad):
  """Returns grad * exp(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    y = math_ops.exp(x)
    return grad * y 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:9,代碼來源:math_grad.py

示例7: _InvGradGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _InvGradGrad(op, grad):
  b = op.inputs[1]
  # op.output[0]: y = -b * conj(a)^2
  with ops.control_dependencies([grad.op]):
    ca = math_ops.conj(op.inputs[0])
    cg = math_ops.conj(grad)
    # pylint: disable=protected-access
    return cg * -2.0 * b * ca, gen_math_ops._reciprocal_grad(ca, grad) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:10,代碼來源:math_grad.py

示例8: _ReciprocalGradGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _ReciprocalGradGrad(op, grad):
  b = op.inputs[1]
  # op.output[0]: y = -b * conj(a)^2
  with ops.control_dependencies([grad.op]):
    ca = math_ops.conj(op.inputs[0])
    cg = math_ops.conj(grad)
    # pylint: disable=protected-access
    return cg * -2.0 * b * ca, gen_math_ops._reciprocal_grad(ca, grad) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:10,代碼來源:math_grad.py

示例9: _SquareGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _SquareGrad(op, grad):
  x = op.inputs[0]
  # Added control dependencies to prevent 2*x from being computed too early.
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * (2.0 * x) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:8,代碼來源:math_grad.py

示例10: _SqrtGradGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _SqrtGradGrad(op, grad):
  a = op.inputs[0]
  y = op.outputs[0]  # y = 0.5 * b / conj(a)
  with ops.control_dependencies([grad.op]):
    ga = grad / a
    return -math_ops.conj(ga) * y, 0.5 * ga 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:8,代碼來源:math_grad.py

示例11: _RsqrtGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _RsqrtGrad(op, grad):
  """Returns -0.5 * grad * conj(y)^3."""
  y = op.outputs[0]  # y = x^(-1/2)
  return gen_math_ops._rsqrt_grad(y, grad) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:6,代碼來源:math_grad.py

示例12: _ExpGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _ExpGrad(op, grad):
  """Returns grad * exp(x)."""
  y = op.outputs[0]  # y = e^x
  with ops.control_dependencies([grad.op]):
    y = math_ops.conj(y)
    return grad * y 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:8,代碼來源:math_grad.py

示例13: _LogGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _LogGrad(op, grad):
  """Returns grad * (1/x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.reciprocal(x) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:8,代碼來源:math_grad.py

示例14: _Log1pGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _Log1pGrad(op, grad):
  """Returns grad * (1/(1 + x))."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.reciprocal(1 + x) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:8,代碼來源:math_grad.py

示例15: _TanhGrad

# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import conj [as 別名]
def _TanhGrad(op, grad):
  """Returns grad * (1 - tanh(x) * tanh(x))."""
  y = op.outputs[0]  # y = tanh(x)
  with ops.control_dependencies([grad.op]):
    y = math_ops.conj(y)
    # pylint: disable=protected-access
    return gen_math_ops._tanh_grad(y, grad) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:9,代碼來源:math_grad.py


注:本文中的tensorflow.python.ops.math_ops.conj方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。