本文整理汇总了Python中tensorflow.Operations方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.Operations方法的具体用法?Python tensorflow.Operations怎么用?Python tensorflow.Operations使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.Operations方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: matmul_resources
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Operations [as 别名]
def matmul_resources(self, op):
"""
checks which one of the direct ancestor tf.Operations is a constant and returns the underlying tensor as a numpy.ndarray inside a tuple. The matrix is manipulated in a way that it can be
used as the left multiplier in the matrix multiplication.
Arguments
---------
op : tf.Operation
must have type "MatMul"
Return
------
output : tuple
tuple with the matrix (of type numpy.ndarray) as its only item
"""
inputs = op.inputs
left = inputs[0]
right = inputs[1]
if left.op.type == "Const":
matrix = self.sess.run(left) if not op.get_attr("transpose_a") else self.sess.run(left).transpose()
else:
matrix = self.sess.run(right).transpose() if not op.get_attr("transpose_b") else self.sess.run(right)
return (matrix,)
示例2: add_resources
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Operations [as 别名]
def add_resources(self, op):
"""
checks which one of the direct ancestor tf.Operations is a constant and returns the underlying tensor as a numpy.ndarray inside a tuple.
Arguments
---------
op : tf.Operation
must have type "Add"
Return
------
output : tuple
tuple with the addend (of type numpy.ndarray) as its only item
"""
inputs = op.inputs
left = inputs[0]
right = inputs[1]
if left.op.type == "Const":
addend = self.sess.run(left)
else:
addend = self.sess.run(right)
return (addend,)
示例3: _dfs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Operations [as 别名]
def _dfs(op, visited=None):
"""Perform DFS on a graph.
Args:
op: A tf.Operation, the root node for the DFS.
visited: A set, used in the recursion.
Returns:
A list of the tf.Operations of type Conv2D that were encountered.
"""
visited = visited or set()
ret = []
for child in op.inputs:
if child.op in visited:
return ret
visited.add(child.op)
if child.op.type not in op_regularizer_manager.NON_PASS_THROUGH_OPS:
ret.extend(_dfs(child.op, visited))
if child.op.type in ('Conv2D',): # TODO: support depthwise conv.
ret.append(child.op)
return ret
示例4: make_copy_ops
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Operations [as 别名]
def make_copy_ops(parent, child, scope='copy_ops'):
"""
Creates the operations to copy variables
args
parent (list of tf.Variables)
child (list of tf.Variables)
returns
copy_ops (list of tf.Operations)
tau (tf.placeholder)
"""
with tf.variable_scope(scope):
tau = tf.Variable(1.0, name='tau')
copy_ops = []
for p, c in zip(parent, child):
assert p.name.split('/')[1:] == c.name.split('/')[1:]
copy_ops.append(
c.assign(tf.add(tf.multiply(p, tau),
tf.multiply(c, 1 - tau))
)
)
return copy_ops, tau
示例5: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Operations [as 别名]
def __init__(self, model, session = None):
"""
This constructor takes a reference to a TensorFlow Operation or Tensor or Keras model and then applies the two TensorFlow functions
graph_util.convert_variables_to_constants and graph_util.remove_training_nodes to cleanse the graph of any nodes that are linked to training. This leaves us with
the nodes you need for inference.
In the resulting graph there should only be tf.Operations left that have one of the following types [Const, MatMul, Add, BiasAdd, Conv2D, Reshape, MaxPool, AveragePool, Placeholder, Relu, Sigmoid, Tanh]
If the input should be a Keras model we will ignore operations with type Pack, Shape, StridedSlice, and Prod such that the Flatten layer can be used.
Arguments
---------
model : tensorflow.Tensor or tensorflow.Operation or tensorflow.python.keras.engine.sequential.Sequential or keras.engine.sequential.Sequential
if tensorflow.Tensor: model.op will be treated as the output node of the TensorFlow model. Make sure that the graph only contains supported operations after applying
graph_util.convert_variables_to_constants and graph_util.remove_training_nodes with [model.op.name] as output_node_names
if tensorflow.Operation: model will be treated as the output of the TensorFlow model. Make sure that the graph only contains supported operations after applying
graph_util.convert_variables_to_constants and graph_util.remove_training_nodes with [model.op.name] as output_node_names
if tensorflow.python.keras.engine.sequential.Sequential: x = model.layers[-1].output.op.inputs[0].op will be treated as the output node of the Keras model. Make sure that the graph only
contains supported operations after applying graph_util.convert_variables_to_constants and graph_util.remove_training_nodes with [x.name] as
output_node_names
if keras.engine.sequential.Sequential: x = model.layers[-1].output.op.inputs[0].op will be treated as the output node of the Keras model. Make sure that the graph only
contains supported operations after applying graph_util.convert_variables_to_constants and graph_util.remove_training_nodes with [x.name] as
output_node_names
session : tf.Session
session which contains the information about the trained variables. If None the code will take the Session from tf.get_default_session(). If you pass a keras model you don't have to
provide a session, this function will automatically get it.
"""
output_names = None
if issubclass(model.__class__, tf.Tensor):
output_names = [model.op.name]
elif issubclass(model.__class__, tf.Operation):
output_names = [model.name]
elif issubclass(model.__class__, Sequential):
session = tf.keras.backend.get_session()
output_names = [model.layers[-1].output.op.inputs[0].op.name]
model = model.layers[-1].output.op
elif issubclass(model.__class__, onnx.ModelProto):
assert 0, 'not tensorflow model'
else:
import keras
if issubclass(model.__class__, keras.engine.sequential.Sequential):
session = keras.backend.get_session()
output_names = [model.layers[-1].output.op.inputs[0].op.name]
model = model.layers[-1].output.op
else:
assert 0, "ERAN can't recognize this input"
if session is None:
session = tf.get_default_session()
tmp = graph_util.convert_variables_to_constants(session, model.graph.as_graph_def(), output_names)
self.graph_def = graph_util.remove_training_nodes(tmp)