本文整理匯總了Python中tensorflow.python.ops.math_ops.zeta方法的典型用法代碼示例。如果您正苦於以下問題:Python math_ops.zeta方法的具體用法?Python math_ops.zeta怎麽用?Python math_ops.zeta使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.math_ops
的用法示例。
在下文中一共展示了math_ops.zeta方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _ZetaGrad
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import zeta [as 別名]
def _ZetaGrad(op, grad):
"""Returns gradient of zeta(x, q) with respect to x and q."""
# TODO(tillahoffmann): Add derivative with respect to x
x = op.inputs[0]
q = op.inputs[1]
# Broadcast gradients
sx = array_ops.shape(x)
sq = array_ops.shape(q)
unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
# Evaluate gradient
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
q = math_ops.conj(q)
partial_q = -x * math_ops.zeta(x + 1, q)
# TODO(b/36815900): Mark None return values as NotImplemented
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
示例2: _ZetaGrad
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import zeta [as 別名]
def _ZetaGrad(op, grad):
"""Returns gradient of zeta(x, q) with respect to x and q."""
# TODO(tillahoffmann): Add derivative with respect to x
x = op.inputs[0]
q = op.inputs[1]
# Broadcast gradients
sx = array_ops.shape(x)
sq = array_ops.shape(q)
unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
# Evaluate gradient
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
q = math_ops.conj(q)
partial_q = -x * math_ops.zeta(x + 1, q)
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
示例3: setUp
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import zeta [as 別名]
def setUp(self):
super(FloatBinaryOpsTest, self).setUp()
self.ops = [
('igamma', None, math_ops.igamma, core.igamma),
('igammac', None, math_ops.igammac, core.igammac),
('zeta', None, math_ops.zeta, core.zeta),
('polygamma', None, math_ops.polygamma, core.polygamma),
('maximum', None, math_ops.maximum, core.maximum),
('minimum', None, math_ops.minimum, core.minimum),
('squared_difference', None, math_ops.squared_difference,
core.squared_difference),
]
total_size = np.prod([v.size for v in self.original_lt.axes.values()])
test_lt = core.LabeledTensor(
math_ops.cast(self.original_lt, dtypes.float32) / total_size,
self.original_lt.axes)
self.test_lt_1 = test_lt
self.test_lt_2 = 1.0 - test_lt
self.test_lt_1_broadcast = self.test_lt_1.tensor
self.test_lt_2_broadcast = self.test_lt_2.tensor
self.broadcast_axes = self.test_lt_1.axes
示例4: _ZetaGrad
# 需要導入模塊: from tensorflow.python.ops import math_ops [as 別名]
# 或者: from tensorflow.python.ops.math_ops import zeta [as 別名]
def _ZetaGrad(op, grad):
"""Returns gradient of zeta(x, q) with respect to x and q."""
# TODO(tillahoffmann): Add derivative with respect to x
x = op.inputs[0]
q = op.inputs[1]
# Broadcast gradients
sx = array_ops.shape(x)
sq = array_ops.shape(q)
# pylint: disable=protected-access
unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
# pylint: enable=protected-access
# Evaluate gradient
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
q = math_ops.conj(q)
partial_q = -x * math_ops.zeta(x + 1, q)
# TODO(b/36815900): Mark None return values as NotImplemented
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:21,代碼來源:math_grad.py