本文整理匯總了Python中tensorflow.executing_eagerly方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.executing_eagerly方法的具體用法?Python tensorflow.executing_eagerly怎麽用?Python tensorflow.executing_eagerly使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow
的用法示例。
在下文中一共展示了tensorflow.executing_eagerly方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_params
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def get_params(self):
"""
Provides access to the model's parameters.
:return: A list of all Variables defining the model parameters.
"""
# Catch eager execution and assert function overload.
try:
if tf.executing_eagerly():
raise NotImplementedError("For Eager execution - get_params "
"must be overridden.")
except AttributeError:
pass
# For Graoh based execution
scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
self.scope)
return scope_vars
示例2: test_get_num_columns_of_2d_tensor
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def test_get_num_columns_of_2d_tensor(self):
"""Tests the "get_num_columns_of_2d_tensor" function."""
self.assertFalse(tf.executing_eagerly())
# Trying to get the number of columns from a non-tensor should fail.
with self.assertRaises(TypeError):
_ = helpers.get_num_columns_of_2d_tensor([[1, 2], [3, 4]])
# Trying to get the number of columns from a rank-1 tensor should fail.
tensor = tf.convert_to_tensor([1, 2, 3, 4])
with self.assertRaises(ValueError):
_ = helpers.get_num_columns_of_2d_tensor(tensor)
# Make sure that we successfully get the number of columns.
tensor = tf.convert_to_tensor([[1, 2, 3]])
self.assertEqual(3, helpers.get_num_columns_of_2d_tensor(tensor))
示例3: test_run_in_graph_and_eager_modes
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if tf.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_utils.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_utils.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
示例4: test_run_in_graph_and_eager_modes_setup_in_same_mode
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if tf.executing_eagerly() else "graph"
class ExampleTest(tf.test.TestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_utils.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[0:2], ["setup_eager", "run_eager"])
self.assertEqual(modes[2:], ["setup_graph", "run_graph"])
示例5: remove
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def remove(self, x):
"""Remove padding from the given tensor.
Args:
x (tf.Tensor): of shape [dim_origin,...]
Returns:
a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin
"""
with tf.name_scope("pad_reduce/remove"):
x_shape = x.get_shape().as_list()
x = tf.gather_nd(
x,
indices=self.nonpad_ids,
)
if not tf.executing_eagerly():
# This is a hack but for some reason, gather_nd return a tensor of
# undefined shape, so the shape is set up manually
x.set_shape([None] + x_shape[1:])
return x
示例6: summarize_video
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def summarize_video(video, prefix, max_outputs=1):
"""Summarize the video using image summaries starting with prefix."""
video_shape = shape_list(video)
if len(video_shape) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(video_shape))
if tf.executing_eagerly():
return
if video.get_shape().as_list()[1] is None:
tf.summary.image(
"%s_last_frame" % prefix,
tf.cast(video[:, -1, :, :, :], tf.uint8),
max_outputs=max_outputs)
else:
for k in range(video_shape[1]):
tf.summary.image(
"%s_frame_%d" % (prefix, k),
tf.cast(video[:, k, :, :, :], tf.uint8),
max_outputs=max_outputs)
示例7: test_minimize_ill_conditioned_not_raised
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def test_minimize_ill_conditioned_not_raised(self):
"""Optimizing an ill conditioned problem should not raise an exception."""
if not tf.executing_eagerly():
return
def f1(x, y):
return x * y * 10000.0
def f2(x, y):
return x * y * 0.0001
x = (1.,)
y = (1.,)
try:
self.evaluate(
levenberg_marquardt.minimize(
residuals=(f1, f2),
variables=(x, y),
max_iterations=1,
regularizer=1e-20))
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
示例8: test_invalid_variable_inputs
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def test_invalid_variable_inputs(self, error_msg, variable_names,
variable_kinds, variable_values, error_eager,
error_graph_mode):
height = 1
width = 1
empty_shader_code = "#version 460\n void main() { }\n"
if tf.executing_eagerly():
error = error_eager
else:
error = error_graph_mode
with self.assertRaisesRegexp(error, error_msg):
self.evaluate(
rasterizer.rasterize(
num_points=0,
variable_names=variable_names,
variable_kinds=variable_kinds,
variable_values=variable_values,
output_resolution=(width, height),
vertex_shader=empty_shader_code,
geometry_shader=empty_shader_code,
fragment_shader=empty_shader_code))
示例9: test_inverse_jacobian_random
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def test_inverse_jacobian_random(self):
"""Test the Jacobian of the inverse function."""
x_axis_init, x_angle_init = test_helpers.generate_random_test_axis_angle()
if tf.executing_eagerly():
# Because axis is returned as is, gradient calculation fails in graph mode
# but not in eager mode. This is a side effect of having a graph rather
# than a problem of the function.
with self.subTest("axis"):
self.assert_jacobian_is_correct_fn(
lambda x: axis_angle.inverse(1.0 * x, x_angle_init)[0],
[x_axis_init])
with self.subTest("angle"):
self.assert_jacobian_is_correct_fn(
lambda x: axis_angle.inverse(x_axis_init, x)[1], [x_angle_init])
示例10: test_dynamic_graph_convolution_keras_layer_exception_not_raised_shapes
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def test_dynamic_graph_convolution_keras_layer_exception_not_raised_shapes(
self, batch_size, num_vertices, in_channels, out_channels, reduction):
"""Check if the convolution parameters and output have correct shapes."""
if not tf.executing_eagerly():
return
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
layer = gc_layer.DynamicGraphConvolutionKerasLayer(
num_output_channels=out_channels,
reduction=reduction)
try:
output = layer(inputs=[data, neighbors], sizes=None)
except Exception as e: # pylint: disable=broad-except
self.fail("Exception raised: %s" % str(e))
self.assertAllEqual((batch_size, num_vertices, out_channels), output.shape)
示例11: test_dynamic_graph_convolution_keras_layer_zero_kernel
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def test_dynamic_graph_convolution_keras_layer_zero_kernel(
self, batch_size, num_vertices, in_channels, out_channels, reduction):
"""Tests convolution with an all-zeros kernel."""
if not tf.executing_eagerly():
return
data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)
data = np.random.uniform(size=data.shape).astype(np.float32)
layer = gc_layer.DynamicGraphConvolutionKerasLayer(
num_output_channels=out_channels,
reduction=reduction,
use_bias=False,
kernel_initializer=tf.compat.v1.keras.initializers.zeros())
output = layer(inputs=[data, neighbors], sizes=None)
self.assertAllEqual(
output,
np.zeros(shape=(batch_size, num_vertices, out_channels),
dtype=np.float32))
示例12: test_dynamic_graph_convolution_keras_layer_duplicate_features
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def test_dynamic_graph_convolution_keras_layer_duplicate_features(
self, num_vertices, in_channels, out_channels):
"""Tests convolution when all vertex features are identical."""
if not tf.executing_eagerly():
return
data = np.random.uniform(size=(1, in_channels))
data = np.tile(data, (num_vertices, 1))
# Results should be independent of 'neighbors'.
neighbors = np.maximum(np.random.randint(
0, 2, size=(num_vertices, num_vertices)), np.eye(num_vertices))
neighbors = _dense_to_sparse(neighbors)
layer = gc_layer.DynamicGraphConvolutionKerasLayer(
num_output_channels=out_channels,
reduction="max")
output = layer(inputs=[data, neighbors], sizes=None)
output_tile = tf.tile(output[:1, :], (num_vertices, 1))
self.assertAllEqual(output, output_tile)
示例13: assert_jacobian_is_correct
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def assert_jacobian_is_correct(self, x, x_init, y, atol=1e-6, delta=1e-6):
"""Tests that the gradient error of y=f(x) is small.
Args:
x: A tensor.
x_init: A numpy array containing the values at which to estimate the
gradients of y.
y: A tensor.
atol: Maximum absolute tolerance in gradient error.
delta: The amount of perturbation.
"""
warnings.warn((
"assert_jacobian_is_correct is deprecated and might get "
"removed in a future version please use assert_jacobian_is_correct_fn"),
DeprecationWarning)
if tf.executing_eagerly():
self.skipTest(reason="Graph mode only test")
max_error, _, _ = self._compute_gradient_error(x, y, x_init, delta)
self.assertLessEqual(max_error, atol)
示例14: assert_jacobian_is_correct_fn
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def assert_jacobian_is_correct_fn(self, f, x, atol=1e-6, delta=1e-6):
"""Tests that the gradient error of y=f(x) is small.
Args:
f: the function.
x: A list of arguments for the function
atol: Maximum absolute tolerance in gradient error.
delta: The amount of perturbation.
"""
# pylint: disable=no-value-for-parameter
if tf.executing_eagerly():
max_error = _max_error(*tf.test.compute_gradient(f, x, delta))
else:
with self.cached_session():
max_error = _max_error(*tf.test.compute_gradient(f, x, delta))
# pylint: enable=no-value-for-parameter
self.assertLessEqual(max_error, atol)
示例15: assert_jacobian_is_finite_fn
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import executing_eagerly [as 別名]
def assert_jacobian_is_finite_fn(self, f, x):
"""Tests that the Jacobian only contains valid values.
The analytical gradients and numerical ones are expected to differ at points
where f(x) is not smooth. This function can be used to check that the
analytical gradient is not 'NaN' nor 'Inf'.
Args:
f: the function.
x: A list of arguments for the function
"""
if tf.executing_eagerly():
theoretical_gradient, _ = tf.compat.v2.test.compute_gradient(f, x)
else:
with self.cached_session():
theoretical_gradient, _ = tf.compat.v2.test.compute_gradient(f, x)
self.assertNotIn(
True, [
np.isnan(element).any() or np.isinf(element).any()
for element in theoretical_gradient
],
msg="nan or inf elements found in theoretical jacobian.")