本文整理汇总了Python中tensorflow.quint8方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.quint8方法的具体用法?Python tensorflow.quint8怎么用?Python tensorflow.quint8使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.quint8方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add_dequantize_result_node
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def add_dequantize_result_node(self, quantized_output_name,
original_node_name, min_tensor_index=1):
min_max_inputs = [
"%s:%s" % (quantized_output_name, min_tensor_index),
"%s:%s" % (quantized_output_name, (min_tensor_index + 1))]
dequantize_name = original_node_name
if self.should_merge_with_fake_quant_node():
fake_quant_node = self.state.output_node_stack[-1][0]
if original_node_name not in self.state.merged_with_fake_quant:
min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
self.state.merged_with_fake_quant[original_node_name] = True
dequantize_name = fake_quant_node.name
dequantize_node = create_node(
"Dequantize", dequantize_name,
[quantized_output_name, min_max_inputs[0], min_max_inputs[1]])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
示例2: eightbitize_mat_mul_node
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def eightbitize_mat_mul_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
quantized_mat_mul_name = original_node.name + "_eightbit_quantized_mat_mul"
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_mat_mul_node = create_node(
"QuantizedMatMul", quantized_mat_mul_name,
all_input_names)
set_attr_dtype(quantized_mat_mul_node, "T1", tf.quint8)
set_attr_dtype(quantized_mat_mul_node, "T2", tf.quint8)
set_attr_dtype(quantized_mat_mul_node, "Toutput", tf.qint32)
copy_attr(quantized_mat_mul_node, "transpose_a",
original_node.attr["transpose_a"])
copy_attr(quantized_mat_mul_node, "transpose_b",
original_node.attr["transpose_b"])
self.add_output_graph_node(quantized_mat_mul_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_mat_mul_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
示例3: eightbitize_reshape_node
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def eightbitize_reshape_node(self, original_node):
"""Replaces a Reshape node with the eight bit equivalent sub-graph.
Args:
original_node: Float node to be converted.
Returns:
Subgraph representing the quantized version of the original node.
"""
namespace_prefix = original_node.name + "_eightbit"
quantized_reshape_name = namespace_prefix + "_quantized_reshape"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
shape_input_name = original_node.input[1]
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_node.input[0],
reshape_dims_name, reduction_dims_name))
quantized_reshape_node = create_node(
"QuantizedReshape", quantized_reshape_name,
[quantize_input_name, shape_input_name, min_input_name, max_input_name])
set_attr_dtype(quantized_reshape_node, "T", tf.quint8)
self.add_output_graph_node(quantized_reshape_node)
self.add_dequantize_result_node(quantized_reshape_name, original_node.name)
示例4: args_check
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def args_check(cls, node, **kwargs):
supported_dtype = [
tf.bfloat16, tf.half, tf.float32, tf.float64, tf.uint8, tf.int8,
tf.int16, tf.int32, tf.int64, tf.complex64, tf.quint8, tf.qint8,
tf.qint32, tf.string, tf.bool, tf.complex128
]
x = kwargs["tensor_dict"][node.inputs[0]]
if x.dtype not in supported_dtype:
exception.OP_UNSUPPORTED_EXCEPT(
"Equal inputs in " + str(x.dtype) + " which", "Tensorflow")
示例5: _testDequantizeOp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def _testDequantizeOp(self, inputs, min_range, max_range, dtype):
with self.test_session():
input_op = tf.constant(inputs, shape=[len(inputs)], dtype=dtype)
dequantized = tf.dequantize(
input_op, min_range, max_range)
tf_ans = dequantized.eval()
# TODO(vrv): Add support for DT_QINT32 quantization if needed.
type_dict = {
tf.quint8: np.uint8,
tf.qint8: np.int8,
tf.quint16: np.uint16,
tf.qint16: np.int16
}
self.assertTrue(dtype in type_dict.keys())
v_max = np.iinfo(type_dict[dtype]).max
v_min = np.iinfo(type_dict[dtype]).min
self.assertTrue(min_range >= v_min)
self.assertTrue(max_range <= v_max)
type_range = v_max - v_min
if v_min < 0:
half_range = (type_range + 1) / 2
else:
half_range = 0.0
np_ans = ((inputs.astype(np.float32) + half_range) *
(max_range - min_range) / type_range) + min_range
self.assertAllClose(tf_ans, np_ans)
示例6: testBasicQuint8
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def testBasicQuint8(self):
self._testDequantizeOp(np.array([0, 128, 255]),
0.0, 6.0, tf.quint8)
self._testDequantizeOp(np.array([0, 128, 255]),
0.0, 123.456, tf.quint8)
self._testDequantizeOp(np.array([0, 4, 42, 108, 243]),
5.0, 200.2, tf.quint8)
示例7: testStringConversion
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def testStringConversion(self):
self.assertIs(tf.float32, tf.as_dtype("float32"))
self.assertIs(tf.float64, tf.as_dtype("float64"))
self.assertIs(tf.int32, tf.as_dtype("int32"))
self.assertIs(tf.uint8, tf.as_dtype("uint8"))
self.assertIs(tf.uint16, tf.as_dtype("uint16"))
self.assertIs(tf.int16, tf.as_dtype("int16"))
self.assertIs(tf.int8, tf.as_dtype("int8"))
self.assertIs(tf.string, tf.as_dtype("string"))
self.assertIs(tf.complex64, tf.as_dtype("complex64"))
self.assertIs(tf.complex128, tf.as_dtype("complex128"))
self.assertIs(tf.int64, tf.as_dtype("int64"))
self.assertIs(tf.bool, tf.as_dtype("bool"))
self.assertIs(tf.qint8, tf.as_dtype("qint8"))
self.assertIs(tf.quint8, tf.as_dtype("quint8"))
self.assertIs(tf.qint32, tf.as_dtype("qint32"))
self.assertIs(tf.bfloat16, tf.as_dtype("bfloat16"))
self.assertIs(tf.float32_ref, tf.as_dtype("float32_ref"))
self.assertIs(tf.float64_ref, tf.as_dtype("float64_ref"))
self.assertIs(tf.int32_ref, tf.as_dtype("int32_ref"))
self.assertIs(tf.uint8_ref, tf.as_dtype("uint8_ref"))
self.assertIs(tf.int16_ref, tf.as_dtype("int16_ref"))
self.assertIs(tf.int8_ref, tf.as_dtype("int8_ref"))
self.assertIs(tf.string_ref, tf.as_dtype("string_ref"))
self.assertIs(tf.complex64_ref, tf.as_dtype("complex64_ref"))
self.assertIs(tf.complex128_ref, tf.as_dtype("complex128_ref"))
self.assertIs(tf.int64_ref, tf.as_dtype("int64_ref"))
self.assertIs(tf.bool_ref, tf.as_dtype("bool_ref"))
self.assertIs(tf.qint8_ref, tf.as_dtype("qint8_ref"))
self.assertIs(tf.quint8_ref, tf.as_dtype("quint8_ref"))
self.assertIs(tf.qint32_ref, tf.as_dtype("qint32_ref"))
self.assertIs(tf.bfloat16_ref, tf.as_dtype("bfloat16_ref"))
with self.assertRaises(TypeError):
tf.as_dtype("not_a_type")
示例8: eightbitize_input_to_node
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def eightbitize_input_to_node(self, namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name):
"""Takes one float input to an op, and converts it to quantized form."""
unique_input_name = unique_node_name_from_input(original_input_name)
reshape_input_name = namespace_prefix + "_reshape_" + unique_input_name
min_input_name = namespace_prefix + "_min_" + unique_input_name
max_input_name = namespace_prefix + "_max_" + unique_input_name
quantize_input_name = namespace_prefix + "_quantize_" + unique_input_name
reshape_input_node = create_node("Reshape", reshape_input_name,
[original_input_name, reshape_dims_name])
set_attr_dtype(reshape_input_node, "T", tf.float32)
self.add_output_graph_node(reshape_input_node)
min_input_node = create_node("Min", min_input_name, [reshape_input_name,
reduction_dims_name])
set_attr_dtype(min_input_node, "T", tf.float32)
set_attr_bool(min_input_node, "keep_dims", False)
self.add_output_graph_node(min_input_node)
max_input_node = create_node("Max", max_input_name, [reshape_input_name,
reduction_dims_name])
set_attr_dtype(max_input_node, "T", tf.float32)
set_attr_bool(max_input_node, "keep_dims", False)
self.add_output_graph_node(max_input_node)
quantize_input_node = create_node("QuantizeV2", quantize_input_name,
[original_input_name, min_input_name,
max_input_name])
set_attr_dtype(quantize_input_node, "T", tf.quint8)
set_attr_string(quantize_input_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(quantize_input_node)
min_output_name = quantize_input_name + ":1"
max_output_name = quantize_input_name + ":2"
return quantize_input_name, min_output_name, max_output_name
示例9: add_quantize_down_nodes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def add_quantize_down_nodes(self, original_node, quantized_output_name):
quantized_outputs = [
quantized_output_name, quantized_output_name + ":1",
quantized_output_name + ":2"
]
min_max_inputs = None
if self.should_merge_with_fake_quant_node():
# Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
# Requantize.
fake_quant_node = self.state.output_node_stack[-1][0]
min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
assert original_node.name not in self.state.merged_with_fake_quant
self.state.merged_with_fake_quant[original_node.name] = True
elif self.fallback_quantization_range:
min_max_inputs = ["fallback_quantization_min_value:0",
"fallback_quantization_max_value:0"]
else:
# Add a RequantizationRange node for finding the min and max values.
requant_range_node = create_node(
"RequantizationRange", original_node.name + "_eightbit_requant_range",
quantized_outputs)
set_attr_dtype(requant_range_node, "Tinput", tf.qint32)
self.add_output_graph_node(requant_range_node)
min_max_inputs = [requant_range_node.name + ":0",
requant_range_node.name + ":1"]
requantize_node = create_node(
"Requantize", original_node.name + "_eightbit_requantize",
quantized_outputs + min_max_inputs)
set_attr_dtype(requantize_node, "Tinput", tf.qint32)
set_attr_dtype(requantize_node, "out_type", tf.quint8)
self.add_output_graph_node(requantize_node)
return requantize_node.name
示例10: eightbitize_bias_add_node
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def eightbitize_bias_add_node(self, original_node):
"""Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
quantized_bias_add_name = (original_node.name +
"_eightbit_quantized_bias_add")
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_bias_add_node = create_node(
"QuantizedBiasAdd", quantized_bias_add_name,
all_input_names)
set_attr_dtype(quantized_bias_add_node, "T1", tf.quint8)
set_attr_dtype(quantized_bias_add_node, "T2", tf.quint8)
set_attr_dtype(quantized_bias_add_node, "out_type", tf.qint32)
self.add_output_graph_node(quantized_bias_add_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_bias_add_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
示例11: add_pool_function
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def add_pool_function(self, original_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "T", tf.quint8)
copy_attr(quantized_op_node, "ksize", original_node.attr["ksize"])
copy_attr(quantized_op_node, "strides", original_node.attr["strides"])
copy_attr(quantized_op_node, "padding", original_node.attr["padding"])
示例12: add_relu_function
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def add_relu_function(self, unused_arg_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "Tinput", tf.quint8)
示例13: eightbitize_placeholder_node
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def eightbitize_placeholder_node(self, current_node):
"""Replaces a placeholder node with a quint8 placeholder node+dequantize."""
name = current_node.name
# Convert the placeholder into a quantized type.
output_node = tf.NodeDef()
output_node.CopyFrom(current_node)
set_attr_dtype(output_node, "dtype", tf.quint8)
output_node.name += "_original_input"
self.add_output_graph_node(output_node)
# Add a dequantize to convert back to float.
dequantize_node = create_node(
"Dequantize", name,
[output_node.name, "quantized_input_min_value",
"quantized_input_max_value"])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
# For the descent over the graph to work, the dequantize node must be named
# current_node.name. However, for the feeding of the graph to work, the
# placeholder must have the name current_node.name; so record a final set
# of renames to apply after all processing has been done.
self.final_node_renames[output_node.name] = name
self.final_node_renames[dequantize_node.name] = name + "_dequantize"
示例14: _RunTestsForQuantizedInputRange
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def _RunTestsForQuantizedInputRange(self, float_graph_def, input_map,
output_names, input_range):
if sys.version_info[0] == 3:
# uint8->quint8 conversion for numpy is not working currently.
return
quantized_input_map = {}
for k, v in input_map.items():
arr = [
int(round((n-input_range[0])*255/(input_range[1]-input_range[0])))
for n in v.flat]
arr = np.array(arr, np.uint8)
arr = arr.reshape(v.shape)
arr = arr.astype(tf.quint8.as_numpy_dtype)
quantized_input_map[k] = arr
output_tensors = [output_name + ":0" for output_name in output_names]
float_results = run_graph_def(float_graph_def, input_map, output_tensors)
# Quantize treating the input as quantized in range <input_range>.
rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
input_range)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, quantized_input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
# Quantize without treating input as quantized.
rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
quantized_input_range=None)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(len(input_map),
ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
示例15: inference
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import quint8 [as 别名]
def inference(self, npimg, resize_to_default=True, upsample_size=1.0):
if npimg is None:
raise Exception('The image is not valid. Please check your image exists.')
if resize_to_default: #1.08155608177
upsample_size = [int(self.target_size[1] / 8 * upsample_size), int(self.target_size[0] / 8 * upsample_size)]
else:
upsample_size = [int(npimg.shape[0] / 8 * upsample_size), int(npimg.shape[1] / 8 * upsample_size)]
if self.tensor_image.dtype == tf.quint8:
# quantize input image
npimg = TfPoseEstimator._quantize_img(npimg)
pass
logger.debug('inference+ original shape=%dx%d' % (npimg.shape[1], npimg.shape[0]))
img = npimg
if resize_to_default:
img = self._get_scaled_img(npimg, None)[0][0]
peaks, heatMat_up, pafMat_up = self.persistent_sess.run(
[self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={
self.tensor_image: [img], self.upsample_size: upsample_size
})
peaks = peaks[0]
self.heatMat = heatMat_up[0]
self.pafMat = pafMat_up[0]
logger.debug('inference- heatMat=%dx%d pafMat=%dx%d' % (
self.heatMat.shape[1], self.heatMat.shape[0], self.pafMat.shape[1], self.pafMat.shape[0]))
t = time.time()
humans = PoseEstimator.estimate_paf(peaks, self.heatMat, self.pafMat)
logger.debug('estimate time=%.5f' % (time.time() - t))
return humans