本文整理汇总了Python中tensorflow.python.ops.functional_ops._symbolic_gradient函数的典型用法代码示例。如果您正苦于以下问题:Python _symbolic_gradient函数的具体用法?Python _symbolic_gradient怎么用?Python _symbolic_gradient使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_symbolic_gradient函数的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: XSquarePlusOneGrad
def XSquarePlusOneGrad(x, dy):
dx = functional_ops._symbolic_gradient(
input=[x, dy],
Tout=[tf.float32],
# This line on define_function to register the above
# function with name "XSquarePlusOneFn"
f="XSquarePlusOneFn",
name="dx")
return dx
示例2: _SymGrad
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
示例3: testSymGradShape
def testSymGradShape(self):
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32, [25, 4])
y = tf.placeholder(tf.float32, [200, 100])
dz = tf.placeholder(tf.float32, [1])
# We assume Foo is a function of (x, y) -> (z) Then, Foo's
# gradient function is (x, y, dz) -> (dx, dy). dx's shape
# should be the same as x's; and dy's shape should be the same
# as y's.
dx, dy = functional_ops._symbolic_gradient(input=[x, y, dz], Tout=[tf.float32] * 2, f="Foo")
self.assertEqual(x.get_shape(), dx.get_shape())
self.assertEqual(y.get_shape(), dy.get_shape())
示例4: XSquarePlusOneGrad
def XSquarePlusOneGrad(x, dy):
dx = functional_ops._symbolic_gradient(
input=[x, dy], Tout=[dtypes.float32], f="XSquarePlusOneFn", name="dx")
return dx
示例5: gradients
#.........这里部分代码省略.........
stop_ops = _StopOps(from_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
is_func_call = ops.get_default_graph()._is_function(op.type)
if not is_func_call and any(
isinstance(g, ops.Tensor) or g for g in out_grads) and (
op._id not in stop_ops):
# pylint: enable=protected-access
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and any(
isinstance(g, ops.Tensor) or g for g in out_grads):
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor)
and not out_grad) and _IsFloat(op.outputs[i]):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if is_func_call:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
# pylint: disable=protected-access
in_grads = _AsList(functional_ops._symbolic_gradient(
f_in, f_types, op.type))
# pylint: enable=protected-access
else:
in_grads = _AsList(grad_fn(op, *out_grads))
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(
[x for x in in_grads if x is not None]) > 1:
in_grads = control_flow_ops.tuple(in_grads)
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagates a list of None backwards.
in_grads = [None] * len(op.inputs)
for t_in, in_grad in zip(op.inputs, in_grads):
if in_grad is not None:
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# update pending count for the inputs of op.
# pylint: disable=protected-access
for x in op.inputs:
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
if ready:
queue.append(x.op)
for x in op.control_inputs:
pending_count[x._id] -= 1
if pending_count[x._id] is 0:
queue.append(x)
# pylint: enable=protected-access
return [_GetGrad(grads, x) for x in xs]