本文整理汇总了Python中tensorflow.python.debug.stepper.NodeStepper.dirty_variables方法的典型用法代码示例。如果您正苦于以下问题:Python NodeStepper.dirty_variables方法的具体用法?Python NodeStepper.dirty_variables怎么用?Python NodeStepper.dirty_variables使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.debug.stepper.NodeStepper
的用法示例。
在下文中一共展示了NodeStepper.dirty_variables方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testContToUpdateA
# 需要导入模块: from tensorflow.python.debug.stepper import NodeStepper [as 别名]
# 或者: from tensorflow.python.debug.stepper.NodeStepper import dirty_variables [as 别名]
def testContToUpdateA(self):
stepper = NodeStepper(self.sess, "optim")
result = stepper.cont("a:0")
self.assertAllClose(1.0, result)
self.assertEqual({}, stepper.last_feed_types())
result = stepper.cont("optim/learning_rate:0")
self.assertAllClose(0.01, result)
self.assertEqual({}, stepper.last_feed_types())
# Before any cont calls on ApplyGradientDescent, there should be no "dirty"
# variables.
self.assertEqual(set(), stepper.dirty_variables())
# First, all the two control inputs to optim.
result = stepper.cont("optim/update_a/ApplyGradientDescent")
# Now variable a should have been marked as dirty due to the update
# by optim/update_a/ApplyGradientDescent.
self.assertEqual({"a:0"}, stepper.dirty_variables())
self.assertIsNone(result)
self.assertEqual({
"optim/learning_rate:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Check that Variable "a" has been updated properly, but "b", "c" and "d"
# remain the same.
# For backprop on Variable a:
# Because f = a * b * b * c, df / da = b * b * c.
# 1.0 - learning_rate * b * b * c
# = 1.0 - 0.01 * 2.0 * 2.0 * 4.0 = 0.84.
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
示例2: testSelectiveHandleUsageDependingOnTransitiveCleanliness
# 需要导入模块: from tensorflow.python.debug.stepper import NodeStepper [as 别名]
# 或者: from tensorflow.python.debug.stepper.NodeStepper import dirty_variables [as 别名]
def testSelectiveHandleUsageDependingOnTransitiveCleanliness(self):
"""Test tensor handlers are using only during clean transitive closure.
"clean" means no Variables have been updated by preceding cont() calls.
"""
stepper = NodeStepper(self.sess, "optim")
# First, call cont() on the two tensors on the intermediate level: e and f.
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertEqual(set(), stepper.dirty_variables())
# The cont call above should have restored Variable "b".
result = stepper.cont("e:0")
self.assertAllClose(8.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertEqual(set(), stepper.dirty_variables())
# Now run update_a, so as to let Variable a be diry.
result = stepper.cont("optim/update_a/ApplyGradientDescent",
restore_variable_values=True)
self.assertIsNone(result)
self.assertEqual({"a:0"}, stepper.dirty_variables())
# Now, run update_b.
result = stepper.cont("optim/update_b/ApplyGradientDescent",
restore_variable_values=True)
self.assertIsNone(result)
# The last cont() run should have use the handle of tensor e, but not the
# handle of tensor d, because the transitive closure of e is clean, whereas
# that of d is dirty due to the update to a in the previous cont() call.
self.assertEqual({
"e:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# The result of the update_b should be identical to as if no other
# update_* cont() calls have occurred before.
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
示例3: testUpdateTwiceRestoreVariable
# 需要导入模块: from tensorflow.python.debug.stepper import NodeStepper [as 别名]
# 或者: from tensorflow.python.debug.stepper.NodeStepper import dirty_variables [as 别名]
def testUpdateTwiceRestoreVariable(self):
stepper = NodeStepper(self.sess, "optim")
result = stepper.cont("optim/update_a/ApplyGradientDescent",
restore_variable_values=True)
self.assertIsNone(result)
self.assertEqual({"a:0"}, stepper.dirty_variables())
result = stepper.cont("optim/update_b/ApplyGradientDescent",
restore_variable_values=True)
self.assertIsNone(result)
# Variables a and c should have been restored and hence no longer dirty.
# Variable b should have been marked as dirty.
self.assertEqual({"b:0"}, stepper.dirty_variables())
# The result of the update should be identitcal to as if only update_b is
# run.
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
示例4: testContToUpdateB
# 需要导入模块: from tensorflow.python.debug.stepper import NodeStepper [as 别名]
# 或者: from tensorflow.python.debug.stepper.NodeStepper import dirty_variables [as 别名]
def testContToUpdateB(self):
stepper = NodeStepper(self.sess, "optim")
result = stepper.cont("optim/update_b/ApplyGradientDescent")
self.assertIsNone(result)
self.assertEqual(set(["b:0"]), stepper.dirty_variables())
# For backprop on Variable b:
# Because f = a * b * b * c, df / da = 2 * a * b * c.
# 2.0 - learning_rate * 2 * a * b * c
# = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
示例5: testContAfterUpdateWithoutRestoringVariableValue
# 需要导入模块: from tensorflow.python.debug.stepper import NodeStepper [as 别名]
# 或者: from tensorflow.python.debug.stepper.NodeStepper import dirty_variables [as 别名]
def testContAfterUpdateWithoutRestoringVariableValue(self):
stepper = NodeStepper(self.sess, "optim")
# First, update Variable a from 1.0 to 0.84.
result = stepper.cont("optim/update_a/ApplyGradientDescent",
restore_variable_values=True)
self.assertIsNone(result)
self.assertEqual(set(["a:0"]), stepper.dirty_variables())
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
# Second, update Variable b without the default restore_variable_values.
result = stepper.cont(
"optim/update_b/ApplyGradientDescent", restore_variable_values=False)
self.assertIsNone(result)
# For the backprop on Variable b under the updated value of a:
# 2.0 - learning_rate * 2 * a' * b * c
# = 2.0 - 0.01 * 2 * 0.84 * 2.0 * 4.0 = 1.8656
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.8656, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
示例6: testOverrideThenContToUpdate
# 需要导入模块: from tensorflow.python.debug.stepper import NodeStepper [as 别名]
# 或者: from tensorflow.python.debug.stepper.NodeStepper import dirty_variables [as 别名]
def testOverrideThenContToUpdate(self):
"""Test cont() to update nodes after overriding tensor values."""
stepper = NodeStepper(self.sess, "optim")
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertEqual(set(), stepper.dirty_variables())
self.assertEqual(["d:0"], stepper.handle_names())
# Override the value from 1.0 to 10.0.
stepper.override_tensor("a/read:0", 10.0)
self.assertEqual(["a/read:0"], stepper.override_names())
result = stepper.cont("optim/update_c/ApplyGradientDescent",
restore_variable_values=True)
self.assertIsNone(result)
# The last cont() call should have not used the tensor handle to d:0,
# because the transitive closure of d:0 contains an override tensor.
self.assertEqual({
"a/read:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
# The tensor handle to d:0 should have been removed due to the dirty
# transitive closure.
self.assertEqual([], stepper.handle_names())
# For this backprop on c, the overriding value of a/read:0 should have been
# used:
# 4.0 - learning_rate * a * b * b
# = 4.0 - 0.01 * 10.0 * 2.0 * 2.0 = 3.6.
self.assertAllClose(3.6, self.sess.run(self.c))
# Now remove the overriding value of a/read:0.
stepper.remove_override("a/read:0")
self.assertEqual([], stepper.override_names())
# Obtain the tensor handle to d:0 again.
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual(["d:0"], stepper.handle_names())
# Then call update_c again, without restoring c.
result = stepper.cont(
"optim/update_c/ApplyGradientDescent", restore_variable_values=False)
self.assertIsNone(result)
# This time, the d:0 tensor handle should have been used, because its
# transitive closure is clean.
self.assertEqual({
"d:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# For this backprop on c, the overriding value of a/read:0 should have been
# used:
# 3.6 - learning_rate * a * b * b
# = 3.6 - 0.01 * 1.0 * 2.0 * 2.0 = 3.56.
self.assertAllClose(3.56, self.sess.run(self.c))