本文整理汇总了Python中tensorflow.python.ops.array_ops.prevent_gradient函数的典型用法代码示例。如果您正苦于以下问题:Python prevent_gradient函数的具体用法?Python prevent_gradient怎么用?Python prevent_gradient使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了prevent_gradient函数的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _SoftmaxCrossEntropyWithLogitsGrad
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
"""Gradient function for SoftmaxCrossEntropyWithLogits."""
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
softmax_grad_without_gradient = array_ops.prevent_gradient(op.outputs[1])
return _BroadcastMul(grad_0, softmax_grad_without_gradient), None
示例2: _CTCLossGrad
def _CTCLossGrad(op, grad_loss, _):
"""The derivative provided by CTC Loss.
Args:
op: the CTCLoss op.
grad_loss: The backprop for cost.
Returns:
The CTC Loss gradient.
"""
# Outputs are: loss, grad
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
grad_without_gradient = array_ops.prevent_gradient(op.outputs[1])
# Return gradient for inputs and None for
# labels_indices, labels_values and sequence_length
return [_BroadcastMul(grad_loss, grad_without_gradient), None, None, None]
示例3: testPreventGradient
def testPreventGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.prevent_gradient(inp)
with self.assertRaisesRegexp(LookupError, "explicitly disabled"):
_ = gradients.gradients(out, inp)
示例4: _FuzzyCTCLossGrad
def _FuzzyCTCLossGrad(op, grad_loss, _):
grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1], message="Currently there is no way to take the second "
" derivative of ctc_loss due to the fused implementation's interaction "
" with tf.gradients()")
return [_BroadcastMul(tf.expand_dims(grad_loss, -1), grad_without_gradient), None, None, None]