本文整理汇总了Python中tensorflow.python.eager.context.in_eager_mode函数的典型用法代码示例。如果您正苦于以下问题:Python in_eager_mode函数的具体用法?Python in_eager_mode怎么用?Python in_eager_mode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了in_eager_mode函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add_variable
def add_variable(self, name, shape=None, dtype=None, initializer=None):
"""***Only for use by descendants of Metric***."""
if self._built:
raise RuntimeError("Can't call add_variable() except in build().")
if context.in_eager_mode():
collections = None
else:
if self._use_global_variables:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
collections += [ops.GraphKeys.METRIC_VARIABLES]
# Variables are Checkpointable dependencies of Metrics regardless of the
# global/local distinction. Users can avoid saving variables by not adding a
# dependency on the Metric.
v = self._add_variable_with_custom_getter(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=False,
collections=collections,
use_resource=True,
getter=variable_scope.get_variable,
# Raise duplicate variable exceptions from get_variable rather than
# Checkpointable.
overwrite=True)
self._vars.append(v)
if context.in_eager_mode():
self._initial_values[v] = v.value()
return v
示例2: infer_step
def infer_step(recent_output, recent_logits, unused_loss):
"""Inference step."""
if not context.in_eager_mode():
recent_output.set_shape([None, None, None, 1])
padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]])
features["targets"] = padded
# This is inefficient in that it generates samples at all timesteps,
# not just the last one, except if target_modality is pointwise.
samples, logits, losses = self.sample(features)
# Concatenate the already-generated recent_output with last timestep
# of the newly-generated samples.
if target_modality.top_is_pointwise:
cur_sample = samples[:, -1, :, :]
else:
cur_sample = samples[:,
common_layers.shape_list(recent_output)[1], :, :]
cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1))
samples = tf.concat([recent_output, cur_sample], axis=1)
if not context.in_eager_mode():
samples.set_shape([None, None, None, 1])
# Assuming we have one shard for logits.
logits = tf.concat([recent_logits, logits[:, -1:]], 1)
loss = sum([l for l in losses.values() if l is not None])
return samples, logits, loss
示例3: testVariablesAcrossGraphs
def testVariablesAcrossGraphs(self):
optimizer = momentum_lib.MomentumOptimizer(0.01, 0.5)
with ops.Graph().as_default():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtypes.float32, name="var0")
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtypes.float32, name="var1")
if context.in_eager_mode():
loss = lambda: math_ops.reduce_sum(var0 + var1)
else:
loss = math_ops.reduce_sum(var0 + var1)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var0")
self.assertStartsWith(optimizer_variables[1].name, "var1")
self.assertEquals(2, len(optimizer_variables))
with ops.Graph().as_default():
var2 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtypes.float32, name="var2")
var3 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtypes.float32, name="var3")
if context.in_eager_mode():
loss = lambda: math_ops.reduce_sum(var2 + var3)
else:
loss = math_ops.reduce_sum(var2 + var3)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var2")
self.assertStartsWith(optimizer_variables[1].name, "var3")
self.assertEquals(2, len(optimizer_variables))
示例4: test_zero_padding_1d
def test_zero_padding_1d(self):
num_samples = 2
input_dim = 2
num_steps = 5
shape = (num_samples, num_steps, input_dim)
inputs = np.ones(shape)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': 2},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': (1, 2)},
input_shape=inputs.shape)
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.ZeroPadding1D(padding=2)
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.in_eager_mode():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding1D(padding=(1, 2))
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.in_eager_mode():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for left_offset in [0]:
np.testing.assert_allclose(np_output[:, left_offset, :], 0.)
for right_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, :], 1.)
layer.get_config()
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=None)
示例5: test_averagepooling_2d
def test_averagepooling_2d(self):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'same',
'pool_size': (2, 2)},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'valid',
'pool_size': (3, 3)},
input_shape=(3, 5, 6, 4))
# This part of the test can only run on GPU but doesn't appear
# to be properly assigned to a GPU when running in eager mode.
if not context.in_eager_mode():
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={
'strides': (1, 1),
'padding': 'valid',
'pool_size': (2, 2),
'data_format': 'channels_first'
},
input_shape=(3, 4, 5, 6))
示例6: assert_integer
def assert_integer(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_integer(x)]):
output = tf.reduce_sum(x)
```
Args:
x: `Tensor` whose basetype is integer and is not quantized.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
Raises:
TypeError: If `x.dtype` is anything other than non-quantized integer.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_integer', [x]):
x = ops.convert_to_tensor(x, name='x')
if not x.dtype.is_integer:
if context.in_eager_mode():
name = 'tensor'
else:
name = x.name
err_msg = (
'%s Expected "x" to be integer type. Found: %s of dtype %s'
% (message, name, x.dtype))
raise TypeError(err_msg)
return control_flow_ops.no_op('statically_determined_was_integer')
示例7: _defun_internal
def _defun_internal(name, func, args, kwds):
"""Defines and returns graph-mode version of func."""
graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
with context.graph_mode():
captures = {}
tmp_graph = CapturingGraph(captures)
# Inherit the graph key, since this is used for matching variables in
# optimizers.
tmp_graph._graph_key = graph_key # pylint: disable=protected-access
# Copy the graph collections to ensure summaries and other things work. This
# lets the function access (but not mutate) collections of the containing
# graph, such as the global step and the summary writer collections.
curr_graph = ops.get_default_graph()
for collection in curr_graph.collections:
tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
collection)
with tmp_graph.as_default():
func_inputs = _get_defun_inputs(args)
with capture_tensors(captures):
this_tape = tape.push_new_tape()
try:
func_outputs = func(*func_inputs, **kwds)
finally:
tape.pop_tape(this_tape)
variables = this_tape.watched_variables()
# Returning a closed-over tensor as an output does not trigger a
# call to convert_to_tensor, so we manually capture all such tensors.
outputs_list = _flatten(func_outputs)
func_def_outputs = [
_convert_to_graph_tensor(x) for x in outputs_list if x is not None
]
ids = list(sorted(captures.keys()))
if ids:
extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
else:
extra_inputs = []
extra_placeholders = []
output_shapes = tuple(
x.shape if isinstance(x, ops.Tensor) else None
for x in outputs_list)
flat_inputs = [x for x in nest.flatten(func_inputs)
if isinstance(x, ops.Tensor)]
all_inputs = flat_inputs + list(extra_placeholders)
all_ignored_ops = frozenset(x.op for x in all_inputs)
fname = _inference_name(name)
operations = tuple(x for x in tmp_graph.get_operations()
if x not in all_ignored_ops)
# Register any other functions defined in the graph
# TODO(ashankar): Oh lord, forgive me for this lint travesty.
if context.in_eager_mode():
for f in tmp_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
_register(f._c_func) # pylint: disable=protected-access
return GraphModeFunction(
fname, all_inputs, extra_inputs, tmp_graph, operations, func_def_outputs,
func_outputs, output_shapes, variables)
示例8: start_queue_runners
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitly.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Queues are not compatible with eager execution. To ingest data when eager
execution is enabled, use the `tf.data` API.
@end_compatibility
"""
if context.in_eager_mode():
raise RuntimeError("Queues are not compatible with eager execution.")
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
return threads
示例9: losses
def losses(self):
"""Retrieve the network's losses.
Will only include losses that are either
unconditional, or conditional on inputs to this model
(e.g. will not include losses that depend on tensors
that aren't inputs to this model).
Returns:
A list of loss tensors.
"""
losses = []
if context.in_eager_mode():
for layer in self.layers:
losses += layer.losses
return losses
for layer in self.layers:
losses += layer.losses
relevant_inputs = []
for i in range(len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
reachable = layers_util.get_reachable_from_inputs(relevant_inputs, losses)
relevant_conditional_losses = [x for x in losses if x in reachable]
unconditional_losses = [
x for x in losses if x._unconditional_loss] # pylint: disable=protected-access
return list(set(
relevant_conditional_losses + unconditional_losses + self._losses))
示例10: decorator
def decorator(hp_fn, registration_name=None):
"""Registers & returns hp_fn with registration_name or default name."""
hp_name = registration_name or default_name(hp_fn)
if hp_name in _HPARAMS and not context.in_eager_mode():
raise LookupError("HParams set %s already registered." % hp_name)
_HPARAMS[hp_name] = hp_fn
return hp_fn
示例11: _convert_to_graph_constant
def _convert_to_graph_constant(value, dtype=None, name=None, as_ref=False):
"""Captures a tfe Tensor while building a graph mode function.
Creates a placeholder to pass the tensor as an argument.
Arguments:
value: A tfe.Tensor object
dtype: The datatype of the value produced by the node in the graph.
name: Name of the node in the graph.
as_ref: Ignored (required by register_tensor_conversion_function).
Returns:
A placeholder which will, at runtime, have the value of this tensor.
Raises:
ValueError: if called outside a defun context.
"""
if context.in_eager_mode():
return value
_ = as_ref
tensor_map = _scoped_captures.tensors
if tensor_map is None:
raise ValueError(
"Trying to use tfe.Tensor objects in a graph outside graph mode. "
"To build a graph use tfe.defun or tfe.make_template.")
captured_value = tensor_map.get(ops.tensor_id(value), None)
if captured_value is None:
captured_value = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
if captured_value.dtype == dtypes.resource:
captured_value._handle_data = value._handle_data # pylint: disable=protected-access
tensor_map[ops.tensor_id(value)] = (value, captured_value)
else:
captured_value = captured_value[1]
return captured_value
示例12: zero_state
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size, state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size, s]` for each s in `state_size`.
"""
# Try to use the last cached zero_state. This is done to avoid recreating
# zeros, especially when eager execution is enabled.
state_size = self.state_size
is_eager = context.in_eager_mode()
if is_eager and hasattr(self, "_last_zero_state"):
(last_state_size, last_batch_size, last_dtype,
last_output) = getattr(self, "_last_zero_state")
if (last_batch_size == batch_size and
last_dtype == dtype and
last_state_size == state_size):
return last_output
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
output = _zero_state_tensors(state_size, batch_size, dtype)
if is_eager:
self._last_zero_state = (state_size, batch_size, dtype, output)
return output
示例13: testMultiIONetworkbuilding
def testMultiIONetworkbuilding(self):
input_a = network_layers.Input(shape=(32,))
input_b = network_layers.Input(shape=(16,))
a = core_layers.Dense(16)(input_a)
class AddLayer(base_layers.Layer):
def call(self, inputs):
return inputs[0] + inputs[1]
def compute_output_shape(self, input_shape):
return input_shape[0]
c = AddLayer()([a, input_b]) # pylint: disable=not-callable
c = core_layers.Dense(2)(c)
network = network_layers.GraphNetwork([input_a, input_b], [a, c])
if context.in_eager_mode():
a_val = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
b_val = constant_op.constant(
np.random.random((10, 16)).astype('float32'))
outputs = network([a_val, b_val])
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [10, 16])
self.assertEqual(outputs[1].shape.as_list(), [10, 2])
示例14: _convert_to_graph_tensor
def _convert_to_graph_tensor(value, dtype=None, name=None, as_ref=False):
"""Captures a Tensor while building a graph mode function.
Arguments:
value: A Tensor object.
dtype: The datatype of the value produced by the node in the graph.
name: Name of the node in the graph.
as_ref: Ignored (required by register_tensor_conversion_function).
Returns:
Returns a constant (the current value of the tensor) if capturing
is not enabled. A placeholder which will have the value of the
tensor at runtime otherwise.
"""
if context.in_eager_mode():
return value
_ = as_ref
tensor_map = _scoped_captures.tensors
if tensor_map is None:
# Capturing is not enabled.
return constant_op.constant(value.numpy())
captured_value = tensor_map.get(ops.tensor_id(value), None)
if captured_value is None:
captured_value = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
if captured_value.dtype == dtypes.resource:
captured_value._handle_data = value._handle_data # pylint: disable=protected-access
tensor_map[ops.tensor_id(value)] = (value, captured_value)
else:
captured_value = captured_value[1]
tape.record_operation("captured_value", [captured_value], [value], [],
lambda x: x)
return captured_value
示例15: losses
def losses(self):
"""Retrieve the network's losses.
Will only include losses that are either
unconditional, or conditional on inputs to this model
(e.g. will not include losses that depend on tensors
that aren't inputs to this model).
Returns:
A list of loss tensors.
"""
losses = []
if context.in_eager_mode():
for layer in self.layers:
losses += layer.losses
return losses
# Retrieve losses for all internal layers.
for layer in self.layers:
if hasattr(layer, 'losses'):
# Collect losses that are dependent on inputs
# that are part of the model.
for node_index, node in enumerate(layer._inbound_nodes): # pylint: disable=protected-access
node_key = _make_node_key(layer.name, node_index)
if node_key in self._network_nodes:
# The model owns this layer node.
inputs = node.input_tensors
losses += layer.get_losses_for(inputs)
# Collect unconditional losses.
losses += layer.get_losses_for(None)
# Add any potential unconditional model-level loss.
losses += self.get_losses_for(None)
return losses