本文整理汇总了Python中tensorflow.python.framework.tensor_util.MakeNdarray方法的典型用法代码示例。如果您正苦于以下问题:Python tensor_util.MakeNdarray方法的具体用法?Python tensor_util.MakeNdarray怎么用?Python tensor_util.MakeNdarray使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.framework.tensor_util
的用法示例。
在下文中一共展示了tensor_util.MakeNdarray方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _SumGrad
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
if (op.inputs[0].get_shape().ndims is not None and
op.inputs[1].op.type == "Const"):
rank = op.inputs[0].get_shape().ndims
axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value"))
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
grad = array_ops.reshape(grad, [1] * rank)
# If shape is not fully defined (but rank is), we use Shape.
if op.inputs[0].get_shape().is_fully_defined():
input_shape = op.inputs[0].get_shape().as_list()
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
示例2: values_from_const
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def values_from_const(node_def):
"""Extracts the values from a const NodeDef as a numpy ndarray.
Args:
node_def: Const NodeDef that has the values we want to access.
Returns:
Numpy ndarray containing the values.
Raises:
ValueError: If the node isn't a Const.
"""
if node_def.op != "Const":
raise ValueError(
"Node named '%s' should be a Const op for values_from_const." %
node_def.name)
input_tensor = node_def.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
return tensor_value
示例3: _SumGrad
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
if (op.inputs[0].get_shape().ndims is not None and op.inputs[1].op.type ==
"Const"):
rank = op.inputs[0].get_shape().ndims
axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value"))
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
grad = array_ops.reshape(grad, [1] * rank)
# If shape is not fully defined (but rank is), we use Shape.
if op.inputs[0].get_shape().is_fully_defined():
input_shape = op.inputs[0].get_shape().as_list()
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
示例4: testHalf
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def testHalf(self):
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=np.float16))
self.assertProtoEquals("""
dtype: DT_HALF
tensor_shape {
dim {
size: 2
}
}
half_val: 18688
half_val: 19712
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float16, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=np.float16), a)
示例5: testLargeNegativeInt
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def testLargeNegativeInt(self):
# We don't use the min np.int64 value here
# because it breaks np.abs().
#
# np.iinfo(np.int64).min = -9223372036854775808
# np.iinfo(np.int64).max = 9223372036854775807
# np.abs(-9223372036854775808) = -9223372036854775808
value = np.iinfo(np.int64).min + 1
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
示例6: testComplex64N
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def testComplex64N(self):
t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
dtype=tf.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 1 } dim { size: 3 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
示例7: testComplex128N
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def testComplex128N(self):
t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
dtype=tf.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 1 } dim { size: 3 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
示例8: testComplex64NpArray
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def testComplex64NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex64)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 2 } dim { size: 2 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
scomplex_val: 7
scomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a)
示例9: quantize_weight_rounded
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def quantize_weight_rounded(input_node):
"""Returns a replacement node for input_node containing bucketed floats."""
input_tensor = input_node.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
shape = input_tensor.tensor_shape
# Currently, the parameter FLAGS.bitdepth is used to compute the
# number of buckets as 1 << FLAGS.bitdepth, meaning the number of
# buckets can only be a power of 2.
# This could be fixed by introducing a new parameter, num_buckets,
# which would allow for more flexibility in chosing the right model
# size/accuracy tradeoff. But I didn't want to add more parameters
# to this script than absolutely necessary.
num_buckets = 1 << FLAGS.bitdepth
tensor_value_rounded = quantize_array(tensor_value, num_buckets)
tensor_shape_list = tensor_util.TensorShapeProtoToList(shape)
return [create_constant_node(input_node.name, tensor_value_rounded,
tf.float32, shape=tensor_shape_list)]
示例10: _get_bias
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def _get_bias(self, source_node, IR_node):
if not source_node.out_edges:
return
add_node = self.tf_graph.get_node(source_node.out_edges[0])
if add_node.type != "Add" and add_node.type != "BiasAdd":
return
variable = self.check_const(self.tf_graph.get_node(add_node.in_edges[1])) #add_bias node
if not variable or variable.type != 'Const':
return
bias_value = variable.get_attr('value')
bias = tensor_util.MakeNdarray(bias_value)
# assert variable.get_attr('_output_shapes')[0].dim[0].size == IR_node.attr['kernel_shape'].list.i[-1]
add_node.real_name = IR_node.name
add_node.covered = True
IR_node.attr['use_bias'].b = True
current_layer = self.weights[source_node.name]
current_layer['bias'] = bias
示例11: rename_StridedSlice
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def rename_StridedSlice(self, source_node):
IR_node = self._convert_identity_operation(source_node, end_idx=1, new_op = 'Slice')
kwargs = {}
kwargs = {
'begin_mask' : source_node.get_attr('begin_mask'),
'end_mask' : source_node.get_attr('end_mask'),
}
starts = self.get_parent(source_node.name, [1]).layer.attr['value'].tensor
starts = tensor_util.MakeNdarray(starts).tolist()
kwargs['starts'] = starts
ends = self.get_parent(source_node.name, [2]).layer.attr['value'].tensor
ends = tensor_util.MakeNdarray(ends).tolist()
kwargs['ends'] = ends
if self.get_parent(source_node.name, [3]) != None:
strides = self.get_parent(source_node.name, [3]).layer.attr['value'].tensor
strides = tensor_util.MakeNdarray(strides).tolist()
kwargs['strides'] = strides
assign_IRnode_values(IR_node, kwargs)
示例12: rename_MatMul
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def rename_MatMul(self, source_node):
IR_node = self._convert_identity_operation(source_node, end_idx=1)
input_weight_node = self.src_graph.get_parent(source_node.name, [1])
weightnode = self.check_const(input_weight_node)
weight_value = weightnode.get_attr('value')
weight = tensor_util.MakeNdarray(weight_value)
self.set_weight(source_node.name, 'weights', weight)
units = source_node.layer.attr['_output_shapes'].list.shape[-1].dim[-1].size
IR_node.attr['units'].i = units
if source_node.out_edges and self.tf_graph.get_node(source_node.out_edges[0]).type == 'BiasAdd':
add_node = self.tf_graph.get_node(source_node.out_edges[0])
add_node.covered = True
add_node.real_name = source_node.real_name
TensorflowParser2._copy_and_reop(source_node, IR_node, 'FullyConnected')
variable = self.tf_graph.get_node(add_node.in_edges[1]) #add_bias node
biasnode = self.check_const(variable)
bias_value = biasnode.get_attr('value')
bias = tensor_util.MakeNdarray(bias_value)
self.set_weight(source_node.name, 'bias', bias)
IR_node.attr['use_bias'].b = True
示例13: _rename_Const
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def _rename_Const(self, source_node):
IR_node = self._convert_identity_operation(source_node, in_edge_count=0, new_op='Constant') # Constant
value = source_node.get_attr('value')
if value.float_val:
shape = tuple(self.tensor_shape_to_list(value.tensor_shape))
value = np.full(shape, value.float_val[0])
elif value.int_val:
shape = tuple(self.tensor_shape_to_list(value.tensor_shape))
value = np.full(shape, value.int_val[0])
else:
value = np.array(tensor_util.MakeNdarray(value).tolist())
if value.ndim > 1:
self.set_weight(source_node.name, 'value', value)
else:
kwargs = {'value': value}
assign_IRnode_values(IR_node, kwargs)
示例14: rename_Slice
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def rename_Slice(self, source_node):
input_node_begin = self.get_parent(source_node.name, [1])
input_node_size = self.get_parent(source_node.name, [2])
begin = tensor_util.MakeNdarray(input_node_begin.layer.attr['value'].tensor)
size = tensor_util.MakeNdarray(input_node_size.layer.attr['value'].tensor)
IR_node = self._convert_identity_operation(source_node, in_edge_count=1, new_op='Slice')
# TODO: axis
end = size + begin
kwargs = {
'starts' : begin,
'ends' : end
}
assign_IRnode_values(IR_node, kwargs)
示例15: quantize_weight_rounded
# 需要导入模块: from tensorflow.python.framework import tensor_util [as 别名]
# 或者: from tensorflow.python.framework.tensor_util import MakeNdarray [as 别名]
def quantize_weight_rounded(input_node):
"""Returns a replacement node for input_node containing bucketed floats."""
input_tensor = input_node.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
shape = input_tensor.tensor_shape
# Currently, the parameter FLAGS.bitdepth is used to compute the
# number of buckets as 1 << FLAGS.bitdepth, meaning the number of
# buckets can only be a power of 2.
# This could be fixed by introducing a new parameter, num_buckets,
# which would allow for more flexibility in chosing the right model
# size/accuracy tradeoff. But I didn't want to add more parameters
# to this script than absolutely necessary.
num_buckets = 1 << FLAGS.bitdepth
tensor_value_rounded = quantize_array(tensor_value, num_buckets)
tensor_shape_list = tensor_util.TensorShapeProtoToList(shape)
return [
create_constant_node(
input_node.name,
tensor_value_rounded,
dtypes.float32,
shape=tensor_shape_list)
]