本文整理汇总了Python中tensorflow.python.eager.context.eager_mode函数的典型用法代码示例。如果您正苦于以下问题:Python eager_mode函数的具体用法?Python eager_mode怎么用?Python eager_mode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了eager_mode函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_graph_mode_isolation
def test_graph_mode_isolation(self):
with context.graph_mode():
# Even if we've (accidentally) called IsolateTest in Graph mode, it should
# provide Eager isolation.
with test_util.IsolateTest():
with context.eager_mode():
first_container_variable = resource_variable_ops.ResourceVariable(
name="first_container_variable",
initial_value=1)
with context.eager_mode():
with self.assertRaises(ValueError):
first_container_variable.read_value()
示例2: testIteratorResourceCleanup
def testIteratorResourceCleanup(self):
filename = os.path.join(self.get_temp_dir(), "text.txt")
with open(filename, "wt") as f:
for i in range(3):
f.write("%d\n" % (i,))
with context.eager_mode():
first_iterator = iter(readers.TextLineDataset(filename))
self.assertEqual(b"0", next(first_iterator).numpy())
second_iterator = iter(readers.TextLineDataset(filename))
self.assertEqual(b"0", next(second_iterator).numpy())
# Eager kernel caching is based on op attributes, which includes the
# Dataset's output shape. Create a different kernel to test that they
# don't create resources with the same names.
different_kernel_iterator = iter(
readers.TextLineDataset(filename).repeat().batch(16))
self.assertEqual([16], next(different_kernel_iterator).shape)
# Remove our references to the Python Iterator objects, which (assuming no
# reference cycles) is enough to trigger DestroyResourceOp and close the
# partially-read files.
del first_iterator
del second_iterator
del different_kernel_iterator
if not psutil_import_succeeded:
self.skipTest(
"psutil is required to check that we've closed our files.")
open_files = psutil.Process().open_files()
self.assertNotIn(filename, [open_file.path for open_file in open_files])
示例3: testAnonymousVarsInInit
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.AdamOptimizer(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = util.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
示例4: test_callable_evaluate
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
示例5: test_flatten
def test_flatten(self):
"""Test invoking Flatten in eager mode."""
with context.eager_mode():
with tfe.IsolateTest():
input = np.random.rand(5, 10, 4).astype(np.float32)
result = layers.Flatten()(input)
assert result.shape == (5, 40)
示例6: test_vina_free_energy
def test_vina_free_energy(self):
"""Test invoking VinaFreeEnergy in eager mode."""
with context.eager_mode():
with tfe.IsolateTest():
n_atoms = 5
m_nbrs = 1
ndim = 3
nbr_cutoff = 1
start = 0
stop = 4
X = np.random.rand(n_atoms, ndim).astype(np.float32)
Z = np.random.randint(0, 2, (n_atoms)).astype(np.float32)
layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result = layer(X, Z)
assert len(layer.variables) == 6
assert result.shape == tuple()
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result2 = layer2(X, Z)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(X, Z)
assert np.allclose(result, result3)
示例7: test_max_pool_1d
def test_max_pool_1d(self):
"""Test invoking MaxPool1D in eager mode."""
with context.eager_mode():
with tfe.IsolateTest():
input = np.random.rand(4, 6, 8).astype(np.float32)
result = layers.MaxPool1D(strides=2)(input)
assert result.shape == (4, 3, 8)
示例8: test_constant
def test_constant(self):
"""Test invoking Constant in eager mode."""
with context.eager_mode():
with tfe.IsolateTest():
value = np.random.rand(5, 4).astype(np.float32)
result = layers.Constant(value)()
assert np.array_equal(result, value)
示例9: from_saved_model
def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None):
"""Creates a TFLiteConverter object from a SavedModel directory.
Args:
saved_model_dir: SavedModel directory to convert.
signature_keys: List of keys identifying SignatureDef containing inputs
and outputs. Elements should not be duplicated. By default the
`signatures` attribute of the MetaGraphdef is used. (default
saved_model.signatures)
tags: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set(SERVING))
Returns:
TFLiteConverter object.
Raises:
Invalid signature keys.
"""
# Ensures any graphs created in Eager mode are able to run. This is required
# in order to create a tf.estimator.Exporter that exports a TFLite model.
with context.eager_mode():
saved_model = _load(saved_model_dir, tags)
if not signature_keys:
signature_keys = saved_model.signatures
funcs = []
for key in signature_keys:
if key not in saved_model.signatures:
raise ValueError("Invalid signature key '{}' found. Valid keys are "
"'{}'.".format(key, ",".join(saved_model.signatures)))
funcs.append(saved_model.signatures[key])
return cls(funcs, saved_model)
示例10: compute_output_shape
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# Make use of existing autocomputation but provide Lambda-specific
# error message. This is always safe to run even when the outer context
# is Graph mode because Lambda layers don't have side effects such as
# `add_loss`.
with context.eager_mode():
try:
return super(Lambda, self).compute_output_shape(input_shape)
except NotImplementedError:
raise NotImplementedError(
'We could not automatically infer the shape of the Lambda\'s '
'output. Please specify `output_shape` for this Lambda.')
if callable(self._output_shape):
output_shapes = self._output_shape(input_shape)
return tf_utils.convert_shapes(output_shapes, to_tuples=False)
# Output shapes are passed directly and don't include batch dimension.
input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
batch_size = nest.flatten(input_tensor_shape)[0][0] if input_shape else None
def _add_batch(shape):
return tensor_shape.TensorShape([batch_size] + shape.as_list())
output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False)
return nest.map_structure(_add_batch, output_shapes)
示例11: testDatasetEagerIteration
def testDatasetEagerIteration(self, execution_mode):
with context.eager_mode(), context.execution_mode(execution_mode):
val = 0
dataset = dataset_ops.Dataset.range(10)
for foo in dataset:
self.assertEqual(val, foo.numpy())
val += 1
示例12: decorated
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self.setUp()
def run_eager_mode(self, **kwargs):
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
if assert_no_eager_garbage:
run_eager_mode = assert_no_new_tensors(
assert_no_garbage_created(run_eager_mode))
with context.eager_mode():
with IsolateTest():
run_eager_mode(self, **kwargs)
示例13: testAssignDifferentShapesEager
def testAssignDifferentShapesEager(self):
with context.eager_mode():
with variable_scope.variable_scope("foo"):
var = variable_scope.get_variable("x", shape=[1, 1],
dtype=dtypes.float32)
assign = var.assign(np.zeros(shape=[2, 2]))
self.evaluate(assign)
示例14: testSlotsUniqueEager
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = variables.Variable(1.)
v2 = variables.Variable(1.)
opt = rmsprop.RMSprop(1., momentum=0., centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and one unique slot variable for v1 and v2.
self.assertEqual(3, len(set(opt.variables())))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertEqual(5, len(set(opt.variables())))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and three unique slot variables for v1 and v2
self.assertEqual(7, len(set(opt.variables())))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
示例15: test_lstm
def test_lstm(self):
"""Test invoking LSTM in eager mode."""
with context.eager_mode():
with tfe.IsolateTest():
batch_size = 10
n_hidden = 7
in_channels = 4
n_steps = 6
input = np.random.rand(batch_size, n_steps, in_channels).astype(
np.float32)
layer = layers.LSTM(n_hidden, batch_size)
result, state = layer(input)
assert result.shape == (batch_size, n_steps, n_hidden)
assert len(layer.variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.LSTM(n_hidden, batch_size)
result2, state2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3, state3 = layer(input)
assert np.allclose(result, result3)
# But if we specify a different starting state, that should produce a
# different result.
result4, state4 = layer(input, initial_state=state3)
assert not np.allclose(result, result4)