本文整理汇总了Python中tensorflow.python.eager.backprop.implicit_grad函数的典型用法代码示例。如果您正苦于以下问题:Python implicit_grad函数的具体用法?Python implicit_grad怎么用?Python implicit_grad使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了implicit_grad函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testDefunCanBeDifferentiatedTwice
def testDefunCanBeDifferentiatedTwice(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v * v
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
# Ensure that v is watched again.
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
示例2: testDefunDifferentiable
def testDefunDifferentiable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v * v
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
示例3: testGradientOfGatherWithDefun
def testGradientOfGatherWithDefun(self):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
grad_fn = backprop.implicit_grad(sum_gather)
gradient = grad_fn()
defun_grad_fn = backprop.implicit_grad(function.defun(sum_gather))
defun_gradient = defun_grad_fn()
self.assertEqual(len(gradient), len(defun_gradient))
gradient = gradient[0][0]
defun_gradient = defun_gradient[0][0]
self.assertAllEqual(gradient.values, defun_gradient.values)
self.assertAllEqual(gradient.indices, defun_gradient.indices)
self.assertAllEqual(gradient.dense_shape, defun_gradient.dense_shape)
示例4: testUnconnectedNone
def testUnconnectedNone(self):
v = resource_variable_ops.ResourceVariable(
1.0, name='testUnconnectedNone')
def f():
v.read_value()
return constant_op.constant(1.0)
self.assertEqual(backprop.implicit_grad(f)()[0][0], None)
示例5: _test_minimize_loss_graph
def _test_minimize_loss_graph(self, d, soft_placement=False,
learning_rate=0.2):
config = config_pb2.ConfigProto()
config.allow_soft_placement = soft_placement
config.gpu_options.per_process_gpu_memory_fraction = 0.3
with context.graph_mode(), \
ops.Graph().as_default(), \
self.test_session(config=config) as sess, \
d.scope():
l = core.Dense(1, use_bias=False)
def loss(x):
# TODO(josh11b): What if this constant was instead a captured
# value? Would it need to be a value that has been passed
# through d.broadcast()?
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
grad_fn = backprop.implicit_grad(loss)
def update(v, g):
return v.assign_sub(learning_rate * g)
one = d.broadcast(constant_op.constant([[1.]]))
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.call_for_each_tower(grad_fn, one)
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
g = d.reduce(
variable_scope.VariableAggregation.SUM, g, destinations=v)
with ops.control_dependencies(d.update(
v, update, g, grouped=False)):
after_list.append(d.read_var(v))
return before_list, after_list
before_out, after_out = step()
variables.global_variables_initializer().run()
for i in range(10):
b, a = sess.run((before_out, after_out))
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
示例6: testImplicitGradWithResourceVariable
def testImplicitGradWithResourceVariable(self):
x = resource_variable_ops.ResourceVariable(initial_value=tensor.Tensor(1.0),
name='x')
def fn():
tape.watch(x.handle)
b = tensor.Tensor(2.0)
c = math_ops.add(x.value(), b)
return math_ops.add(c, tensor.Tensor(3.0))
grad = backprop.implicit_grad(fn)()[0][1]
self.assertEqual(grad.numpy(), 1.0)
示例7: testGradients
def testGradients(self):
@graph_callable.graph_callable([])
def my_function():
v = variable_scope.get_variable(
"v", initializer=init_ops.constant_initializer(3.), shape=())
return v * v
grad_fn = backprop.implicit_grad(my_function)
grads_and_vars = list(zip(*grad_fn()))
self.assertAllEqual(6., grads_and_vars[0][0])
示例8: testVariableGradient
def testVariableGradient(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(1.0)
def f():
x = v0 * v0
return x
grads = backprop.implicit_grad(f)()
self.assertEqual(2., grads[0][0].numpy())
示例9: _test_minimize_loss_graph
def _test_minimize_loss_graph(self,
d,
soft_placement=False,
learning_rate=0.2):
config = config_pb2.ConfigProto()
config.allow_soft_placement = soft_placement
config.gpu_options.per_process_gpu_memory_fraction = 0.3
with context.graph_mode(), \
ops.Graph().as_default(), \
self.cached_session(config=config) as sess, \
d.scope():
l = core.Dense(1, use_bias=False)
def loss(x):
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
grad_fn = backprop.implicit_grad(loss)
def update(v, g):
return v.assign_sub(learning_rate * g)
one = constant_op.constant([[1.]])
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.extended.call_for_each_replica(grad_fn, args=(one,))
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.extended.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
g = d.extended.reduce_to(
reduce_util.ReduceOp.SUM, g, destinations=v)
with ops.control_dependencies(
d.extended.update(v, update, args=(g,), group=False)):
after_list.append(d.extended.read_var(v))
return before_list, after_list
before_out, after_out = step()
variables.global_variables_initializer().run()
for i in range(10):
b, a = sess.run((before_out, after_out))
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
示例10: testGPUImplicitGrad
def testGPUImplicitGrad(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(
constant_op.constant(1.0), name='v')
def f():
with context.device('gpu:0'):
return v.read_value()
self.assertEqual(
backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0)
示例11: testReturningNonTensorRaisesError
def testReturningNonTensorRaisesError(self):
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
optimizer.apply_gradients = function.defun(optimizer.apply_gradients)
v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(lambda v: v**2)(v)
with self.assertRaisesRegexp(TypeError,
'.*must return zero or more Tensors.*'):
# TODO(akshayka): We might want to allow defun-ing Python functions
# that return operations (and just execute the op instead of running it).
optimizer.apply_gradients(grad)
示例12: testEarlyGradAggregation
def testEarlyGradAggregation(self):
# Needs to be a list so mutations by the callback affect this function.
add_n = []
def callback(op_type, unused_1, unused_2, unused_3, unused_4):
if compat.as_bytes(op_type) == compat.as_bytes('AddN'):
add_n.append(1)
context.context().add_post_execution_callback(callback)
v = resource_variable_ops.ResourceVariable(constant_op.constant(2.0),
name='v')
def fn():
outputs = []
for _ in range(20):
outputs.append(v * constant_op.constant(2.0))
return math_ops.add_n(outputs)
# By default the aggregation count is 2.
_ = backprop.implicit_grad(fn)()[0][1]
self.assertEqual(len(add_n), 2)
del add_n[:]
# Reduce the aggregation limit, cause the backprop to do some
# early aggregation.
# pylint: disable=protected-access
old_cnt = imperative_grad._MIN_AGGREGATE_COUNT
old_bytes = imperative_grad._MIN_AGGREGATE_BYTES
imperative_grad._MIN_AGGREGATE_COUNT = 10
imperative_grad._MIN_AGGREGATE_BYTES = 1
_ = backprop.implicit_grad(fn)()
self.assertEqual(len(add_n), 6)
del add_n[:]
# Aggregation is also limited by the memory.
imperative_grad._MIN_AGGREGATE_BYTES = 10000
_ = backprop.implicit_grad(fn)()
self.assertEqual(len(add_n), 2)
imperative_grad._MIN_AGGREGATE_COUNT = old_cnt
imperative_grad._MIN_AGGREGATE_BYTES = old_bytes
# pylint: enable=protected-access
context.context().clear_post_execution_callbacks()
示例13: testImplicitGradWithResourceVariable
def testImplicitGradWithResourceVariable(self):
x = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(1.0), name='x')
def fn():
b = constant_op.constant(2.0)
c = math_ops.add(x.value(), b)
return math_ops.add(c, constant_op.constant(3.0))
grads_and_vars = backprop.implicit_grad(fn)()
self.assertAllEqual(grads_and_vars[0][0], 1.0)
self.assertAllEqual(id(grads_and_vars[0][1]), id(x))
示例14: testMultiValueConvertToTensor
def testMultiValueConvertToTensor(self):
x = resource_variable_ops.ResourceVariable(
initial_value=array_ops.constant([1.0]), name='x')
def fn():
a = math_ops.add(x.value(), 1.0)
# Make sure convert_to_tensor works correctly with list of TensorNodes.
b = array_ops.stack([a, a], axis=0)
return math_ops.reduce_mean(b)
grad = backprop.implicit_grad(fn)()[0][0]
self.assertAllEqual([1.0], grad)
示例15: testDifferentShapesEager
def testDifferentShapesEager(self):
# Checks that kernel caching does not cause sharing of temporary storage
# across different input shapes when executing eagerly.
with context.eager_mode():
with ops.device("gpu:0"):
first_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 28]))
second_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 100]))
self.assertAllEqual([28, 100, 100], first_output.shape)
self.assertAllEqual([28, 100, 100], second_output.shape)
def _LossFunc():
first_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 28]))
second_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 100]))
return (math_ops.reduce_sum(first_output) +
math_ops.reduce_sum(second_output))
backprop.implicit_grad(_LossFunc)()