本文整理汇总了Python中tensorflow.RegisterGradient方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.RegisterGradient方法的具体用法?Python tensorflow.RegisterGradient怎么用?Python tensorflow.RegisterGradient使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.RegisterGradient方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _py_func_with_gradient
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def _py_func_with_gradient(func, inp, Tout, stateful=True, name=None,
grad_func=None):
"""
PyFunc defined as given by Tensorflow
:param func: Custom Function
:param inp: Function Inputs
:param Tout: Ouput Type of out Custom Function
:param stateful: Calculate Gradients when stateful is True
:param name: Name of the PyFunction
:param grad: Custom Gradient Function
:return:
"""
# Generate random name in order to avoid conflicts with inbuilt names
rnd_name = 'PyFuncGrad-' + '%0x' % getrandbits(30 * 4)
# Register Tensorflow Gradient
tf.RegisterGradient(rnd_name)(grad_func)
# Get current graph
g = tf.get_default_graph()
# Add gradient override map
with g.gradient_override_map(
{"PyFunc": rnd_name, "PyFuncStateless": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
示例2: _create_op_handle
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def _create_op_handle(compiled_op):
op = compiled_op.op
handle = int(compiled_op.lib.create_new_op(
_to_c_array(op.inputs), len(op.inputs),
_to_c_array(op.outputs), len(op.outputs)))
# Forward
def op_functor(*args, **kwargs):
return compiled_op.op_func(*args, op_handle_ptr=handle, **kwargs)
# Backward
def op_grad_functor(tfop, *args, **kwargs):
return compiled_op.op_grad_func(*(args + tuple(tfop.inputs) + tuple(tfop.outputs)), op_handle_ptr=handle, **kwargs)
try:
tf.RegisterGradient('TfOp' + op.name)(op_grad_functor)
except KeyError as ex:
print("Warning: Gradient already registered to another handle")
return op_functor, compiled_op.lib, handle
示例3: reverse_gradient
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def reverse_gradient(X, hp_lambda):
'''Flips the sign of the incoming gradient during training.'''
try:
reverse_gradient.num_calls += 1
except AttributeError:
reverse_gradient.num_calls = 1
grad_name = "GradientReversal%d" % reverse_gradient.num_calls
@tf.RegisterGradient(grad_name)
def _flip_gradients(op, grad):
return [tf.negative(grad) * hp_lambda]
g = K.get_session().graph
with g.gradient_override_map({'Identity': grad_name}):
y = tf.identity(X)
return y
示例4: clip_gradient
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def clip_gradient(value, clip_value):
if not hasattr(clip_gradient, 'added_gradients'):
clip_gradient.added_gradients = set()
session = tensorflow_session()
graph = session.graph
operation_name = "ClipGradient-" + str(clip_value)
if operation_name not in clip_gradient.added_gradients:
# Make sure that we won't create the same operation twice.
# Otherwise tensorflow will trigger an exception.
@tf.RegisterGradient(operation_name)
def clip_gradient_grad(op, grad):
return tf.clip_by_value(grad, -clip_value, clip_value)
clip_gradient.added_gradients.add(operation_name)
with graph.gradient_override_map({"Identity": operation_name}):
return tf.identity(value)
示例5: _py_func_with_gradient
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def _py_func_with_gradient(func, inp, Tout, stateful=True, name=None,
grad_func=None):
"""
PyFunc defined as given by Tensorflow
:param func: Custom Function
:param inp: Function Inputs
:param Tout: Ouput Type of out Custom Function
:param stateful: Calculate Gradients when stateful is True
:param name: Name of the PyFunction
:param grad: Custom Gradient Function
:return:
"""
# Generate random name in order to avoid conflicts with inbuilt names
rnd_name = 'PyFuncGrad-' + '%0x' % getrandbits(30 * 4)
# Register Tensorflow Gradient
tf.RegisterGradient(rnd_name)(grad_func)
# Get current graph
g = tf.get_default_graph()
# Add gradient override map
with g.gradient_override_map({"PyFunc": rnd_name,
"PyFuncStateless": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
示例6: guided_relu
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def guided_relu():
"""
Returns:
A context where the gradient of :meth:`tf.nn.relu` is replaced by
guided back-propagation, as described in the paper:
`Striving for Simplicity: The All Convolutional Net
<https://arxiv.org/abs/1412.6806>`_
"""
from tensorflow.python.ops import gen_nn_ops # noqa
@tf.RegisterGradient("GuidedReLU")
def GuidedReluGrad(op, grad):
return tf.where(0. < grad,
gen_nn_ops.relu_grad(grad, op.outputs[0]),
tf.zeros(grad.get_shape()))
g = tf.get_default_graph()
with g.gradient_override_map({'Relu': 'GuidedReLU'}):
yield
示例7: py_func_grad
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def py_func_grad(func, inp, Tout, stateful=True, name=None, grad=None):
"""Custom py_func with gradient support
"""
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({
"PyFunc": rnd_name,
"PyFuncStateless": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
示例8: py_func
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
global _py_func_id
rnd_name = 'PyFuncGrad' + '%08d' % _py_func_id
_py_func_id += 1
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name, "PyFuncStateless": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
示例9: register_custom_svd_gradient
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def register_custom_svd_gradient():
tf.RegisterGradient('CustomSvd')(custom_gradient_svd)
示例10: py_func
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad) # see _MySquareGrad for grad example
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
示例11: py_func
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad) # see _MySquareGrad for grad example
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
示例12: py_func
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def py_func(func, inp, Tout, stateful = True, name=None, grad_func=None):
rand_name = 'PyFuncGrad' + str(np.random.randint(0,1E+8))
tf.RegisterGradient(rand_name)(grad_func)
g = tf.get_default_graph()
with g.gradient_override_map({'PyFunc':rand_name}):
return tf.py_func(func,inp,Tout,stateful=stateful, name=name)
示例13: cholesky
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def cholesky(self, A, lower=True, warn=True, correct=False):
assert lower is True
# Gradient through py_func adapted from https://gist.github.com/harpone/3453185b41d8d985356cbe5e57d67342
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({'PyFunc': rnd_name, 'PyFuncStateless': rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def correction(A):
A_new, del_ = A.copy(), 1e-4
while True:
try:
np.linalg.cholesky(A_new)
break
except np.linalg.linalg.LinAlgError:
if warn:
logging.warn('[Cholesky] singular matrix, adding diagonal {}'.format(del_))
A_new = A + del_ * np.eye(A.shape[-1]).astype(self.floatx())
del_ *= 2
return A_new
def _correction_grad(op, grad):
A = op.inputs[0]
return grad
if correct:
shape = A.get_shape()
A = py_func(correction, [A], A.dtype, grad=_correction_grad)
A.set_shape(shape)
return tf.cholesky(A)
# Tensorflow interface
示例14: _register_guided_gradient
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def _register_guided_gradient(name):
if name not in ops._gradient_registry._registry:
@tf.RegisterGradient(name)
def _guided_backprop(op, grad):
dtype = op.outputs[0].dtype
gate_g = tf.cast(grad > 0., dtype)
gate_y = tf.cast(op.outputs[0] > 0., dtype)
return gate_y * gate_g * grad
示例15: _register_rectified_gradient
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import RegisterGradient [as 别名]
def _register_rectified_gradient(name):
if name not in ops._gradient_registry._registry:
@tf.RegisterGradient(name)
def _relu_backprop(op, grad):
dtype = op.outputs[0].dtype
gate_g = tf.cast(grad > 0., dtype)
return gate_g * grad
# Map of modifier type to registration function.