本文整理汇总了Python中tensorflow.python.tf2.enabled函数的典型用法代码示例。如果您正苦于以下问题:Python enabled函数的具体用法?Python enabled怎么用?Python enabled使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了enabled函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_run_all_keras_modes_with_all_model_types_annotate_class_2
def test_run_all_keras_modes_with_all_model_types_annotate_class_2(self):
l = []
@keras_parameterized.run_with_all_model_types
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(dict(testcase_name="_arg",
arg=True))
def testBody(self, arg):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
l.append((mode, should_run_eagerly, testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_arg_v2_eager_functional()
e.testBody_arg_v2_function_functional()
e.testBody_arg_v2_eager_sequential()
e.testBody_arg_v2_function_sequential()
e.testBody_arg_v2_eager_subclass()
e.testBody_arg_v2_function_subclass()
if not tf2.enabled():
e.testBody_arg_v1_graph_functional()
e.testBody_arg_v1_graph_sequential()
e.testBody_arg_v1_graph_subclass()
expected_combinations = {
("eager", True, "functional"),
("eager", False, "functional"),
("eager", True, "sequential"),
("eager", False, "sequential"),
("eager", True, "subclass"),
("eager", False, "subclass"),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("graph", False, "functional"),
("graph", False, "sequential"),
("graph", False, "subclass"),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
示例2: test_run_all_keras_modes_extra_params
def test_run_all_keras_modes_extra_params(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
[dict(testcase_name="_0", with_brackets=True),
dict(testcase_name="_1", with_brackets=False)])
def testBody(self, with_brackets):
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
should_run_eagerly = testing_utils.should_run_eagerly()
l.append((with_brackets, mode, should_run_eagerly))
e = ExampleTest()
if not tf2.enabled():
e.testBody_0_v1_graph()
e.testBody_1_v1_graph()
e.testBody_0_v2_eager()
e.testBody_0_v2_function()
e.testBody_1_v2_eager()
e.testBody_1_v2_function()
expected_combinations = {
("with_brackets", "eager", True),
("with_brackets", "eager", False),
("without_brackets", "eager", True),
("without_brackets", "eager", False),
}
if not tf2.enabled():
expected_combinations = expected_combinations.union({
("with_brackets", "graph", False),
("without_brackets", "graph", False),
})
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
示例3: testBatchSplitting
def testBatchSplitting(self, input_type, api_type, iteration_type,
split_batch_by, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])]
batch_size = 10
if tf2.enabled():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(100).batch(batch_size)
else:
dataset_fn = lambda _: dataset_ops.Dataset.range(100).batch(batch_size)
updated_batch_size = (
batch_size // split_batch_by if split_batch_by else batch_size)
expected_values = [[range(i, i+updated_batch_size),
range(i+updated_batch_size, i+2*updated_batch_size)]
for i in range(0, 100, updated_batch_size*2)]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_fn,
worker_device_pairs,
expected_values,
distribution,
sess=None,
split_batch_by=split_batch_by,
enable_get_next_as_optional=True)
示例4: test_optimizer_errors
def test_optimizer_errors(self):
opt = 1
if tf2.enabled():
expected_regex = ('"opt" must be an instance of a '
'tf.keras.optimizers.Optimizer, but got')
else:
expected_regex = ('"opt" must be an instance of a tf.train.Optimizer or '
'a tf.keras.optimizers.Optimizer, but got')
with self.assertRaisesRegexp(ValueError, expected_regex):
enable_mixed_precision_graph_rewrite(opt)
self.assertFalse(config.get_optimizer_experimental_options()
.get('auto_mixed_precision', False))
opt = gradient_descent_v1.GradientDescentOptimizer(1.0)
opt = loss_scale_optimizer_v1.MixedPrecisionLossScaleOptimizer(opt,
'dynamic')
with self.assertRaisesRegexp(ValueError,
'"opt" must not already be an instance of a '
'MixedPrecisionLossScaleOptimizer.'):
enable_mixed_precision_graph_rewrite(opt)
self.assertFalse(config.get_optimizer_experimental_options()
.get('auto_mixed_precision', False))
opt = gradient_descent_v2.SGD(1.0)
opt = loss_scale_optimizer_v2.LossScaleOptimizer(opt, 'dynamic')
with self.assertRaisesRegexp(ValueError,
'"opt" must not already be an instance of a '
'LossScaleOptimizer.'):
enable_mixed_precision_graph_rewrite(opt)
self.assertFalse(config.get_optimizer_experimental_options()
.get('auto_mixed_precision', False))
示例5: deserialize
def deserialize(config, custom_objects=None):
"""Instantiates a layer from a config dictionary.
Arguments:
config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names)
of custom (non-Keras) objects to class/functions
Returns:
Layer instance (may be Model, Sequential, Network, Layer...)
"""
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
globs = globals() # All layers.
globs['Network'] = models.Network
globs['Model'] = models.Model
globs['Sequential'] = models.Sequential
layer_class_name = config['class_name']
if layer_class_name in _DESERIALIZATION_TABLE:
version = 'v2' if tf2.enabled() else 'v1'
config['class_name'] = _DESERIALIZATION_TABLE[layer_class_name][version]
return deserialize_keras_object(
config,
module_objects=globs,
custom_objects=custom_objects,
printable_module_name='layer')
示例6: get_expected_metric_variable_names
def get_expected_metric_variable_names(var_names, name_suffix=''):
"""Returns expected metric variable names given names and prefix/suffix."""
if tf2.enabled() or context.executing_eagerly():
# In V1 eager mode and V2 variable names are not made unique.
return [n + ':0' for n in var_names]
# In V1 graph mode variable names are made unique using a suffix.
return [n + name_suffix + ':0' for n in var_names]
示例7: testUnevenDatasetBatches
def testUnevenDatasetBatches(self, input_type, api_type, iteration_type):
strategy = mirrored_strategy.MirroredStrategy(
devices=(self._cpu_and_one_gpu_devices()[0][1] +
self._cpu_and_one_gpu_devices()[1][1]),
cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce(
["/job:worker/task:0", "/job:worker/task:1"], 2))
worker_devices = self._cpu_and_one_gpu_devices()
with context.graph_mode(), strategy.scope(), self.cached_session() as sess:
if tf2.enabled():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(9).batch(2)
else:
dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch(2)
if input_type == "dataset":
# Autosharded
expected_values = [[[0, 1], [4, 5], [2, 3], [6, 7]], [[8], [], [], []]]
else:
expected_values = [[[0, 1], [2, 3], [0, 1], [2, 3]],
[[4, 5], [6, 7], [4, 5], [6, 7]], [[8], [], [8], []]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_fn,
worker_devices,
expected_values,
strategy,
sess=sess,
enable_get_next_as_optional=True)
示例8: testOneDevicePerWorker
def testOneDevicePerWorker(self, input_type, api_type, iteration_type,
enable_get_next_as_optional):
strategy = mirrored_strategy.MirroredStrategy(
devices=(self._cpu_devices()[0][1] + self._cpu_devices()[1][1]),
cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce(
["/job:worker/task:0", "/job:worker/task:1"], 1))
worker_devices = self._cpu_devices()
with context.graph_mode(), strategy.scope(), self.cached_session() as sess:
if tf2.enabled():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(4)
else:
dataset_fn = lambda _: dataset_ops.Dataset.range(4)
if input_type == "dataset":
# Autosharded
expected_values = [[0, 1], [2, 3]]
else:
expected_values = [[0, 0], [1, 1], [2, 2], [3, 3]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_fn,
worker_devices,
expected_values,
strategy,
sess=sess,
enable_get_next_as_optional=enable_get_next_as_optional)
示例9: testGradientFloat16
def testGradientFloat16(self):
def grad(x):
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.l2_loss(nn_ops.relu(x))
return tape.gradient(y, x)
def f():
with test_util.use_gpu():
# Randomly construct a 1D shape from [1, 40)
shape = random_ops.random_uniform([1],
minval=1,
maxval=40,
dtype=dtypes.int32)
x32 = random_ops.random_uniform(shape, minval=-1, maxval=1)
x16 = math_ops.cast(x32, dtype=dtypes.float16)
return grad(x32), grad(x16)
# We're going to ensure that the fp16 and fp32 gradients
# are "close" to each other for ~100 random values.
#
# In TensorFlow 1.x, invoking f() (without eager execution enabled)
# would construct a graph. Instead of construct a graph with O(100) nodes,
# we construct a single graph to be executed ~100 times in a Session.
if not tf2.enabled():
d32_tensor, d16_tensor = f()
with self.cached_session() as sess:
f = lambda: sess.run([d32_tensor, d16_tensor])
# Repeat the experiment for 100 times. All tensor shapes and its tensor
# values are randomly generated for each run.
for _ in xrange(100):
d32, d16 = f()
self.assertAllClose(d32, d16, atol=3e-4)
示例10: setUpClass
def setUpClass(cls):
if tf2.enabled():
stats_aggregator._DEFAULT_MAX_QUEUE = 0 # pylint: disable=protected-access
stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV2
# TODO(b/116314787): add graph mode support for StatsAggregatorV2.
else:
stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV1
return test_util.run_all_in_graph_and_eager_modes(cls)
示例11: dataset_fn
def dataset_fn(ctx):
del ctx
if tf2.enabled():
dataset1 = dataset_ops.DatasetV2.range(4)
dataset2 = dataset_ops.DatasetV2.range(4).map(lambda x: x**2)
return dataset_ops.DatasetV2.zip((dataset1, dataset2))
else:
dataset1 = dataset_ops.Dataset.range(4)
dataset2 = dataset_ops.Dataset.range(4).map(lambda x: x**2)
return dataset_ops.Dataset.zip((dataset1, dataset2))
示例12: assertStatisticsHasSum
def assertStatisticsHasSum(self,
handle,
tag,
expected_value,
num_events=-1,
offset=0):
if tf2.enabled():
self._assertEventHasSum(handle, tag, expected_value, num_events, offset)
else:
self._assertSummaryHasSum(handle, tag, expected_value)
示例13: test_run_all_keras_modes
def test_run_all_keras_modes(self):
l = []
class ExampleTest(keras_parameterized.TestCase):
def runTest(self):
pass
@keras_parameterized.run_all_keras_modes
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
l.append((mode, should_run_eagerly))
e = ExampleTest()
if not tf2.enabled():
e.testBody_v1_graph()
e.testBody_v2_eager()
e.testBody_v2_function()
if not tf2.enabled():
self.assertLen(l, 3)
self.assertAllEqual(l, [
("graph", False),
("eager", True),
("eager", False),
])
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, 6)
else:
self.assertLen(l, 2)
self.assertAllEqual(l, [
("eager", True),
("eager", False),
])
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, 4)
示例14: test_singleton_list
def test_singleton_list(self):
shape = tensor_shape.TensorShape([])
fn_true = lambda: [constant_op.constant(1)]
fn_false = lambda: [constant_op.constant(3)]
# Non-strict cond is only available in v1
if not tf2.enabled():
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false, 1, 3)
self._testShape(fn_true, fn_false, [shape], strict=True)
self._testReturnValues(fn_true, fn_false, [1], [3], strict=True)
示例15: test_serialize_deserialize_lstm
def test_serialize_deserialize_lstm(self, layer):
lstm = layer(5, return_sequences=True)
config = keras.layers.serialize(lstm)
self.assertEqual(config['class_name'], 'LSTM')
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.units, 5)
self.assertEqual(new_layer.return_sequences, True)
if tf2.enabled():
self.assertIsInstance(new_layer, keras.layers.UnifiedLSTM)
else:
self.assertIsInstance(new_layer, keras.layers.LSTM)