本文整理汇总了Python中tensorflow.python.eager.context.graph_mode函数的典型用法代码示例。如果您正苦于以下问题:Python graph_mode函数的具体用法?Python graph_mode怎么用?Python graph_mode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了graph_mode函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _worker_fn
def _worker_fn(task_type, task_id, num_gpus):
del num_gpus
tf_config = {
"cluster": self._cluster_spec,
"task": {
"type": task_type,
"index": task_id
}
}
with context.graph_mode(), lock, test.mock.patch.dict(
"os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
strategy = strategy_cls()
with context.graph_mode(), strategy.scope(), self.cached_session(
target="grpc://" + self._cluster_spec[task_type][task_id]) as sess:
if tf2.enabled():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(5).batch(2)
else:
dataset_fn = lambda _: dataset_ops.Dataset.range(5).batch(2)
if (input_type == "dataset" and strategy_cls is
collective_all_reduce_strategy.CollectiveAllReduceStrategy):
# Autosharded
if task_id == 0:
expected_values = [[[0, 1]], [[4]]]
else:
expected_values = [[[2, 3]], [[]]]
# input_context is for between-graph auto-sharding.
input_context = distribute_lib.InputContext(
num_input_pipelines=2,
input_pipeline_id=task_id,
num_replicas_in_sync=2)
else:
expected_values = [[[0, 1]], [[2, 3]], [[4]]]
input_context = None
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_fn,
[("/job:%s/task:%d" %
(task_type, task_id), strategy.extended.worker_devices)],
expected_values,
strategy,
sess=sess,
enable_get_next_as_optional=True,
input_context=input_context)
return True
示例2: test_build_standardized_signature_def_classify_classes_only
def test_build_standardized_signature_def_classify_classes_only(self):
"""Tests classification with one output tensor."""
with context.graph_mode():
input_tensors = {
'input-1':
array_ops.placeholder(
dtypes.string, 1, name='input-tensor-1')
}
classes = array_ops.placeholder(dtypes.string, 1, name='output-tensor-1')
export_output = export_output_lib.ClassificationOutput(classes=classes)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_string = types_pb2.DataType.Value('DT_STRING')
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name='input-tensor-1:0',
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(name='output-tensor-1:0',
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
示例3: testUsageGraph
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
input_value = constant_op.constant([[3.]])
train_op = optimizer.minimize(
model(input_value),
global_step=root.global_step)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
with self.session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
else:
status.assert_consumed()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.global_step))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
示例4: _eager_safe_variable_handle
def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
"""Creates a variable handle with information to do shape inference."""
container = ops.get_default_graph()._container # pylint: disable=protected-access
if container is None:
container = ""
handle = resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
if graph_mode:
return handle
with context.graph_mode(), ops.Graph().as_default() as graph:
h = resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
# Tensor._handle_data contains information for the shape-inference code to
# know the shape and dtype of the variable pointed to by a handle. Since
# shape inference doesn't run in eager mode we copy this data here for when
# the handle is captured by an eager mode function.
# pylint: disable=protected-access
handle._handle_data = resource_variable_ops.get_resource_handle_data(h)
# pylint: enable=protected-access
# Clean up op->graph->op reference cycles.
ops.dismantle_graph(graph)
return handle
示例5: testGraphOpNames
def testGraphOpNames(self):
"""Network operation names should match variable naming."""
def _check_op_prefixes(expected_prefix, checked_ops):
for operation in ops.get_default_graph().get_operations():
if operation.name == "ignore":
continue
if operation.name in checked_ops:
continue
checked_ops.add(operation.name)
self.assertStartsWith(expected_start=expected_prefix,
actual=operation.name)
self.assertNotIn("my_network", operation.name[len(expected_prefix):])
self.assertNotIn("dense", operation.name[len(expected_prefix):])
with context.graph_mode():
net = MyNetwork()
zero = constant_op.constant([[0.]], name="ignore")
net(zero)
checked_ops = set()
_check_op_prefixes(expected_prefix="my_network/dense/",
checked_ops=checked_ops)
net.net2 = net.track_layer(MyNetwork())
net.net2(zero)
_check_op_prefixes(expected_prefix="my_network/my_network/dense/",
checked_ops=checked_ops)
MyNetwork()(zero)
_check_op_prefixes(expected_prefix="my_network_1/dense/",
checked_ops=checked_ops)
示例6: testNameScopeWithGetVariable
def testNameScopeWithGetVariable(self):
def in_cross_tower(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with ops.name_scope("foo"):
c = distribute_lib.get_tower_context().merge_call(in_cross_tower)
return b, c
dist = mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:CPU:0"])
with context.graph_mode(), dist.scope():
with ops.name_scope("main"):
a = variable_scope.get_variable("a", [1])
result = dist.call_for_each_tower(model_fn, run_concurrently=False)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = dist.unwrap(a)
b0, b1 = dist.unwrap(result_b)
c0, c1 = dist.unwrap(result_c)
self.assertEquals("a:0", a0.name)
self.assertEquals("a/replica_1:0", a1.name)
self.assertEquals("b:0", b0.name)
self.assertEquals("b/replica_1:0", b1.name)
self.assertEquals("c:0", c0.name)
self.assertEquals("c/replica_1:0", c1.name)
示例7: decorated
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self.setUp()
def run_eager_mode(self, **kwargs):
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
if assert_no_eager_garbage:
run_eager_mode = assert_no_new_tensors(
assert_no_garbage_created(run_eager_mode))
with context.eager_mode():
with IsolateTest():
run_eager_mode(self, **kwargs)
示例8: testAggregateGradients
def testAggregateGradients(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
ind2 = constant_op.constant(np.array([2, 3]))
ind3 = constant_op.constant(np.array([1, 3]))
# A mixture of IndexedSlices and dense tensor to aggregate.
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = embedding_ops.embedding_lookup(x, ind2)
g3 = embedding_ops.embedding_lookup(x, ind3)
g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))
return g1 * g2 * g3 * g4
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = ops.convert_to_tensor(grad).numpy()
with context.graph_mode(), self.test_session():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_ind2 = array_ops.constant([2, 3])
tf_ind3 = array_ops.constant([1, 3])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
tf_dense_grad = math_ops.unsorted_segment_sum(
tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])
self.assertAllClose(grad, tf_dense_grad.eval())
示例9: testAllV2SummaryOps
def testAllV2SummaryOps(self):
logdir = self.get_temp_dir()
def define_ops():
result = []
# TF 2.0 summary ops
result.append(summary_ops.write('write', 1, step=0))
result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
# TF 1.x tf.contrib.summary ops
result.append(summary_ops.generic('tensor', 1, step=1))
result.append(summary_ops.scalar('scalar', 2.0, step=1))
result.append(summary_ops.histogram('histogram', [1.0], step=1))
result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
return result
with context.graph_mode():
ops_without_writer = define_ops()
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(True):
ops_recording_on = define_ops()
with summary_ops.record_if(False):
ops_recording_off = define_ops()
# We should be collecting all ops defined with a default writer present,
# regardless of whether recording was set on or off, but not those defined
# without a writer at all.
del ops_without_writer
expected_ops = ops_recording_on + ops_recording_off
self.assertCountEqual(expected_ops, summary_ops.all_v2_summary_ops())
示例10: testInitializableIterator
def testInitializableIterator(self):
with context.graph_mode():
devices = ["/device:CPU:0"]
# Using random input since that is only allowed with initializable
# iterator.
dataset = dataset_ops.Dataset.from_tensor_slices(
random_ops.random_uniform((10,)))
per_device_dataset = values.PerDeviceDataset(
dataset, devices, prefetch_on_device=False)
iterator = per_device_dataset.make_initializable_iterator()
self.evaluate(iterator.initializer)
next_element = iterator.get_next()
for _ in range(10):
self.evaluate(next_element)
# Should fail after the input is finished.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
# After re-initializing the iterator, should be able to iterate again.
self.evaluate(iterator.initializer)
for _ in range(10):
self.evaluate(next_element)
示例11: testDataDistributionNoAutoShard
def testDataDistributionNoAutoShard(self):
worker_devices, devices = self._cpu_devices()
with context.graph_mode():
dataset_fn = lambda: dataset_ops.Dataset.range(4)
self._test_dataset(dataset_fn, worker_devices, devices,
[[0, 0], [1, 1], [2, 2], [3, 3]],
auto_shard=False)
示例12: test_training_no_default
def test_training_no_default(self):
with context.graph_mode():
model = TrainingNoDefaultModel()
arg = array_ops.ones([1, 1])
model(arg, True)
six.assertCountEqual(self, [arg], model.inputs)
示例13: testDataDistributionOneDevicePerWorker
def testDataDistributionOneDevicePerWorker(self):
self.skipTest("Temporarily disabled.")
worker_device_map, devices = self._cpu_devices()
with context.graph_mode():
dataset_fn = lambda: dataset_ops.Dataset.range(8)
self._test_dataset(dataset_fn, worker_device_map, devices,
[[0, 1], [2, 3], [4, 5], [6, 7]])
示例14: _defun_internal
def _defun_internal(name, func, args, kwds):
"""Defines and returns graph-mode version of func."""
graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
with context.graph_mode():
captures = {}
tmp_graph = CapturingGraph(captures)
# Inherit the graph key, since this is used for matching variables in
# optimizers.
tmp_graph._graph_key = graph_key # pylint: disable=protected-access
# Copy the graph collections to ensure summaries and other things work. This
# lets the function access (but not mutate) collections of the containing
# graph, such as the global step and the summary writer collections.
curr_graph = ops.get_default_graph()
for collection in curr_graph.collections:
tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
collection)
with tmp_graph.as_default():
func_inputs = _get_defun_inputs(args)
with capture_tensors(captures):
this_tape = tape.push_new_tape()
try:
func_outputs = func(*func_inputs, **kwds)
finally:
tape.pop_tape(this_tape)
variables = this_tape.watched_variables()
# Returning a closed-over tensor as an output does not trigger a
# call to convert_to_tensor, so we manually capture all such tensors.
outputs_list = _flatten(func_outputs)
func_def_outputs = [
_convert_to_graph_tensor(x) for x in outputs_list if x is not None
]
ids = list(sorted(captures.keys()))
if ids:
extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
else:
extra_inputs = []
extra_placeholders = []
output_shapes = tuple(
x.shape if isinstance(x, ops.Tensor) else None
for x in outputs_list)
flat_inputs = [x for x in nest.flatten(func_inputs)
if isinstance(x, ops.Tensor)]
all_inputs = flat_inputs + list(extra_placeholders)
all_ignored_ops = frozenset(x.op for x in all_inputs)
fname = _inference_name(name)
operations = tuple(x for x in tmp_graph.get_operations()
if x not in all_ignored_ops)
# Register any other functions defined in the graph
# TODO(ashankar): Oh lord, forgive me for this lint travesty.
if context.in_eager_mode():
for f in tmp_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
_register(f._c_func) # pylint: disable=protected-access
return GraphModeFunction(
fname, all_inputs, extra_inputs, tmp_graph, operations, func_def_outputs,
func_outputs, output_shapes, variables)
示例15: _eager_safe_variable_handle
def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
"""Creates a variable handle with information to do shape inference."""
container = ops.get_default_graph()._container # pylint: disable=protected-access
if container is None:
container = ""
handle = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
if graph_mode:
return handle
# We do not want two distinct ResourceVariable objects for the same
# underlying resource in the runtime.
# When in eager mode, explicitly ensure so here. When in graph mode, it's
# ensured by always generating different variable names.
exists = gen_resource_variable_ops.var_is_initialized_op(handle)
if exists:
raise ValueError("variable object with name '%s' already created. Use "
"get_variable() if reuse is desired." %
shared_name)
with context.graph_mode(), ops.Graph().as_default():
h = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
# Tensor._handle_data contains information for the shape-inference code to
# know the shape and dtype of the variable pointed to by a handle. Since
# shape inference doesn't run in eager mode we copy this data here for when
# the handle is captured by an eager mode function.
handle._handle_data = h._handle_data # pylint: disable=protected-access
return handle