本文整理汇总了Python中tensorflow.python.framework.ops.get_default_graph方法的典型用法代码示例。如果您正苦于以下问题:Python ops.get_default_graph方法的具体用法?Python ops.get_default_graph怎么用?Python ops.get_default_graph使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.framework.ops
的用法示例。
在下文中一共展示了ops.get_default_graph方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _lower_bound
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def _lower_bound(inputs, bound, name=None):
"""Same as tf.maximum, but with helpful gradient for inputs < bound.
The gradient is overwritten so that it is passed through if the input is not
hitting the bound. If it is, only gradients that push `inputs` higher than
the bound are passed through. No gradients are passed through to the bound.
Args:
inputs: input tensor
bound: lower bound for the input tensor
name: name for this op
Returns:
tf.maximum(inputs, bound)
"""
with ops.name_scope(name, 'GDNLowerBound', [inputs, bound]) as scope:
inputs = ops.convert_to_tensor(inputs, name='inputs')
bound = ops.convert_to_tensor(bound, name='bound')
with ops.get_default_graph().gradient_override_map(
{'Maximum': 'GDNLowerBound'}):
return math_ops.maximum(inputs, bound, name=scope)
示例2: convert_collection_to_dict
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def convert_collection_to_dict(collection, clear_collection=False):
"""Returns an OrderedDict of Tensors with their aliases as keys.
Args:
collection: A collection.
clear_collection: When True, it clears the collection after converting to
OrderedDict.
Returns:
An OrderedDict of {alias: tensor}
"""
output = OrderedDict((alias, tensor)
for tensor in ops.get_collection(collection)
for alias in get_tensor_aliases(tensor))
if clear_collection:
ops.get_default_graph().clear_collection(collection)
return output
示例3: _init_from_proto
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def _init_from_proto(self, context_def, import_scope=None):
"""Creates a new `CondContext` from protocol buffer.
Args:
context_def: `CondContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(context_def, control_flow_pb2.CondContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(
context_def.context_name, import_scope)
self._pred = g.as_graph_element(ops.prepend_name_scope(
context_def.pred_name, import_scope))
self._pivot = g.as_graph_element(ops.prepend_name_scope(
context_def.pivot_name, import_scope))
self._branch = context_def.branch
super(CondContext, self).__init__(values_def=context_def.values_def,
import_scope=import_scope)
示例4: AddOp
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def AddOp(self, op):
"""Add `op` to the current context."""
# For a reduction op, if op is in a grad context and its input is from
# its forward context, moving op to the forward context means we would
# store the tensor after the reduction as opposed to the tensor before
# reduction, and therefore could significantly reduce memory consumption.
# For now, we do this only for a few ops.
if op.type in {"Shape", "Size", "Rank"}:
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
op_input_forward_ctxt = _GetWhileContext(op.inputs[0].op)
if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context:
op_input_ctxt = op.inputs[0].op._get_control_flow_context()
op._set_control_flow_context(op_input_ctxt)
op_input_ctxt._AddOpInternal(op)
return
self._AddOpInternal(op)
示例5: _FixControlInputsAndContext
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def _FixControlInputsAndContext(self, enters):
graph = ops.get_default_graph()
# pylint: disable=protected-access
for e in enters:
if isinstance(e, ops.Tensor):
xs = [e]
else:
if not isinstance(e, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(e))
xs = [e.values, e.indices]
shape = e.dense_shape
if shape is not None:
xs.append(shape)
for x in xs:
inp_op = x.op.inputs[0]
control_inputs = graph._control_dependencies_for_inputs([inp_op])
outer_control_inputs = [op for op in control_inputs
if self._IsInOuterContext(op)]
x.op._set_control_flow_context(self)
x.op._add_control_inputs(outer_control_inputs)
graph._record_op_seen_by_control_dependencies(x.op)
# pylint: enable=protected-access
示例6: add_check_numerics_ops
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def add_check_numerics_ops():
"""Connect a `check_numerics` to every floating point tensor.
`check_numerics` operations themselves are added for each `half`, `float`,
or `double` tensor in the graph. For all ops in the graph, the
`check_numerics` op for all of its (`half`, `float`, or `double`) inputs
is guaranteed to run before the `check_numerics` op on any of its outputs.
Returns:
A `group` op depending on all `check_numerics` ops added.
"""
check_op = []
# This code relies on the ordering of ops in get_operations().
# The producer of a tensor always comes before that tensor's consumer in
# this list. This is true because get_operations() returns ops in the order
# added, and an op can only be added after its inputs are added.
for op in ops.get_default_graph().get_operations():
for output in op.outputs:
if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
message = op.name + ":" + str(output.value_index)
with ops.control_dependencies(check_op):
check_op = [array_ops.check_numerics(output, message=message)]
return control_flow_ops.group(*check_op)
示例7: _get_or_create_eval_step
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def _get_or_create_eval_step():
"""Gets or creates the eval step `Tensor`.
Returns:
A `Tensor` representing a counter for the evaluation step.
Raises:
ValueError: If multiple `Tensors` have been added to the
`tf.GraphKeys.EVAL_STEP` collection.
"""
graph = ops.get_default_graph()
eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP)
if len(eval_steps) == 1:
return eval_steps[0]
elif len(eval_steps) > 1:
raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')
else:
counter = variable_scope.get_variable(
'eval_step',
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP])
return counter
示例8: before_run
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def before_run(self, run_context): # pylint: disable=unused-argument
if self._timer.last_triggered_step() is None:
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
return SessionRunArgs(self._global_step_tensor)
示例9: _as_graph_element
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
示例10: _init_from_proto
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def _init_from_proto(self, queue_runner_def, import_scope=None):
"""Create a QueueRunner from `QueueRunnerDef`.
Args:
queue_runner_def: Optional `QueueRunnerDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(queue_runner_def, queue_runner_pb2.QueueRunnerDef)
g = ops.get_default_graph()
self._queue = g.as_graph_element(
ops.prepend_name_scope(queue_runner_def.queue_name, import_scope))
self._enqueue_ops = [g.as_graph_element(
ops.prepend_name_scope(op, import_scope))
for op in queue_runner_def.enqueue_op_name]
self._close_op = g.as_graph_element(ops.prepend_name_scope(
queue_runner_def.close_op_name, import_scope))
self._cancel_op = g.as_graph_element(ops.prepend_name_scope(
queue_runner_def.cancel_op_name, import_scope))
self._queue_closed_exception_types = tuple(
errors.exception_type_from_error_code(code)
for code in queue_runner_def.queue_closed_exception_types)
# Legacy support for old QueueRunnerDefs created before this field
# was added.
if not self._queue_closed_exception_types:
self._queue_closed_exception_types = (errors.OutOfRangeError,)
示例11: create_global_step
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def create_global_step(graph=None):
"""Create global step tensor in graph.
Args:
graph: The graph in which to create the global step tensor. If missing,
use default graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step tensor is already defined.
"""
graph = graph or ops.get_default_graph()
if get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
示例12: _unique_layer_name
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def _unique_layer_name(name):
"""Makes a layer name (or arbitrary string) unique within a TensorFlow graph.
Arguments:
name: String name to make unique.
Returns:
Unique string name.
Example:
```
>>> _unique_layer_name('dense')
dense_1
>>> _unique_layer_name('dense')
dense_2
```
"""
graph = ops.get_default_graph()
layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS[graph]
layer_name_uids[name] += 1
return name + '_' + str(layer_name_uids[name])
示例13: make_one_shot_iterator
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def make_one_shot_iterator(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
**N.B.** The returned iterator will be initialized automatically.
A "one-shot" iterator does not currently support re-initialization.
Returns:
An `Iterator` over the elements of this dataset.
"""
# NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is
# a 0-argument function.
@function.Defun(capture_by_value=True)
def _make_dataset():
return self.make_dataset_resource()
_make_dataset.add_to_graph(ops.get_default_graph())
return Iterator(
gen_dataset_ops.one_shot_iterator(
dataset_factory=_make_dataset,
output_types=nest.flatten(self.output_types),
output_shapes=nest.flatten(self.output_shapes)), None,
self.output_types, self.output_shapes)
示例14: get_name_scope
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def get_name_scope():
"""Returns the current name scope of the default graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.contrib.framework.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string represnting the current name scope.
"""
return ops.get_default_graph().get_name_scope()
示例15: _init_from_proto
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import get_default_graph [as 别名]
def _init_from_proto(self, variable_def, import_scope=None):
"""Creates a new variable from `VariableDef` protocol buffer.
Args:
variable_def: `VariableDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(variable_def, variable_pb2.VariableDef)
# Create from variable_def.
g = ops.get_default_graph()
self._variable = g.as_graph_element(
ops.prepend_name_scope(variable_def.variable_name,
import_scope=import_scope))
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(variable_def.initializer_name,
import_scope=import_scope))
self._snapshot = g.as_graph_element(
ops.prepend_name_scope(variable_def.snapshot_name,
import_scope=import_scope))
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def)
else:
self._save_slice_info = None
self._caching_device = None