本文整理汇总了Python中tensorflow.python.framework.ops._get_graph_from_inputs方法的典型用法代码示例。如果您正苦于以下问题:Python ops._get_graph_from_inputs方法的具体用法?Python ops._get_graph_from_inputs怎么用?Python ops._get_graph_from_inputs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.framework.ops
的用法示例。
在下文中一共展示了ops._get_graph_from_inputs方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_graph_from_inputs
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import _get_graph_from_inputs [as 别名]
def get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
1. If `graph` is provided, we validate that all inputs in `op_input_list` are
from the same graph.
2. Otherwise, we attempt to select a graph from the first Operation- or
Tensor-valued input in `op_input_list`, and validate that all other
such inputs are in the same graph.
3. If the graph was not specified and it could not be inferred from
`op_input_list`, we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If `op_input_list` is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
# pylint: disable=protected-access
return ops._get_graph_from_inputs(op_input_list, graph)
示例2: _current_graph
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import _get_graph_from_inputs [as 别名]
def _current_graph(op_input_list):
"""Return the graph members of `op_input_list`, or the current graph."""
# pylint: disable=protected-access
return ops._get_graph_from_inputs(op_input_list)
示例3: graph_zeros_like
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import _get_graph_from_inputs [as 别名]
def graph_zeros_like(tensor):
"""Graph-only version of tf.zeros_like(), for internal use only."""
g = ops._get_graph_from_inputs([tensor]) # pylint: disable=protected-access
with g.as_default(), ops.name_scope(None, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
dtype = tensor.dtype.base_dtype
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
op = g.create_op("ZerosLike", [tensor], [dtype], input_types=[dtype],
attrs={"T": dtype_value}, name=name)
result, = op.outputs
return result
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:13,代码来源:graph_only_ops.py
示例4: __call__
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import _get_graph_from_inputs [as 别名]
def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
**Note**: kwarg `scope` is reserved for use by the layer.
Returns:
Output tensor(s).
"""
self._set_scope(kwargs.pop('scope', None))
# Ensure the Layer, if being reused, is working with inputs from
# the same graph as where it was created.
try:
ops._get_graph_from_inputs(nest.flatten(inputs), graph=self.graph) # pylint: disable=protected-access
except ValueError as e:
raise ValueError('Input graph and Layer graph are not the same: %s' % e)
with vs.variable_scope(self._scope,
reuse=self.built or self._reuse) as scope:
with ops.name_scope(scope.original_name_scope):
if not self.built:
# Check input assumptions set before layer building, e.g. input rank.
self._assert_input_compatibility(inputs)
input_list = [
ops.convert_to_tensor(x, name='input')
for x in nest.flatten(inputs)]
input_shapes = [x.get_shape() for x in input_list]
if len(input_shapes) == 1:
self.build(input_shapes[0])
else:
self.build(input_shapes)
if 'scope' in tf_inspect.getargspec(self.call).args:
kwargs['scope'] = scope
# Check input assumptions set after layer building, e.g. input shape.
self._assert_input_compatibility(inputs)
outputs = self.call(inputs, *args, **kwargs)
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if hasattr(self, 'activity_regularizer') and self.activity_regularizer:
output_list = _to_list(outputs)
for output in output_list:
with ops.name_scope('ActivityRegularizer'):
activity_regularization = self.activity_regularizer(output)
self.add_loss(activity_regularization)
_add_elements_to_collection(
activity_regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
self.built = True
return outputs