本文整理汇总了Python中tensorflow.python.ops.functional_ops._symbolic_gradient方法的典型用法代码示例。如果您正苦于以下问题:Python functional_ops._symbolic_gradient方法的具体用法?Python functional_ops._symbolic_gradient怎么用?Python functional_ops._symbolic_gradient使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.functional_ops
的用法示例。
在下文中一共展示了functional_ops._symbolic_gradient方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testGradientFunc
# 需要导入模块: from tensorflow.python.ops import functional_ops [as 别名]
# 或者: from tensorflow.python.ops.functional_ops import _symbolic_gradient [as 别名]
def testGradientFunc(self):
@function.Defun(tf.float32, func_name="XSquarePlusOneFn")
def XSquarePlusOne(x):
return x * x + 1.0
@function.Defun(tf.float32, tf.float32)
def XSquarePlusOneGrad(x, dy):
dx = functional_ops._symbolic_gradient(
input=[x, dy], Tout=[tf.float32], f="XSquarePlusOneFn", name="dx")
return dx
g = tf.Graph()
with g.as_default():
call_f = XSquarePlusOne([2.0])
call_g = XSquarePlusOneGrad([2.0], [0.1])
with tf.Session() as sess:
self.assertAllClose([5.0], sess.run(call_f))
self.assertAllClose([0.4], sess.run(call_g))
示例2: _SymGrad
# 需要导入模块: from tensorflow.python.ops import functional_ops [as 别名]
# 或者: from tensorflow.python.ops.functional_ops import _symbolic_gradient [as 别名]
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
示例3: testSymGradShape
# 需要导入模块: from tensorflow.python.ops import functional_ops [as 别名]
# 或者: from tensorflow.python.ops.functional_ops import _symbolic_gradient [as 别名]
def testSymGradShape(self):
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32, [25, 4])
y = tf.placeholder(tf.float32, [200, 100])
dz = tf.placeholder(tf.float32, [1])
# We assume Foo is a function of (x, y) -> (z) Then, Foo's
# gradient function is (x, y, dz) -> (dx, dy). dx's shape
# should be the same as x's; and dy's shape should be the same
# as y's.
dx, dy = functional_ops._symbolic_gradient(
input=[x, y, dz], Tout=[tf.float32] * 2, f="Foo")
self.assertEqual(x.get_shape(), dx.get_shape())
self.assertEqual(y.get_shape(), dy.get_shape())