本文整理匯總了Python中tensorflow.python.framework.dtypes.qint32方法的典型用法代碼示例。如果您正苦於以下問題:Python dtypes.qint32方法的具體用法?Python dtypes.qint32怎麽用?Python dtypes.qint32使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.framework.dtypes
的用法示例。
在下文中一共展示了dtypes.qint32方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: eightbitize_mat_mul_node
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import qint32 [as 別名]
def eightbitize_mat_mul_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
quantized_mat_mul_name = original_node.name + "_eightbit_quantized_mat_mul"
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_mat_mul_node = create_node("QuantizedMatMul",
quantized_mat_mul_name,
all_input_names)
set_attr_dtype(quantized_mat_mul_node, "T1", dtypes.quint8)
set_attr_dtype(quantized_mat_mul_node, "T2", dtypes.quint8)
set_attr_dtype(quantized_mat_mul_node, "Toutput", dtypes.qint32)
copy_attr(quantized_mat_mul_node, "transpose_a",
original_node.attr["transpose_a"])
copy_attr(quantized_mat_mul_node, "transpose_b",
original_node.attr["transpose_b"])
self.add_output_graph_node(quantized_mat_mul_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_mat_mul_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
示例2: add_quantize_down_nodes
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import qint32 [as 別名]
def add_quantize_down_nodes(self, original_node, quantized_output_name):
quantized_outputs = [
quantized_output_name, quantized_output_name + ":1",
quantized_output_name + ":2"
]
min_max_inputs = None
if self.should_merge_with_fake_quant_node():
# Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
# Requantize.
fake_quant_node = self.state.output_node_stack[-1][0]
min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
assert original_node.name not in self.state.merged_with_fake_quant
self.state.merged_with_fake_quant[original_node.name] = True
elif self.fallback_quantization_range:
min_max_inputs = [
"fallback_quantization_min_value:0",
"fallback_quantization_max_value:0"
]
else:
# Add a RequantizationRange node for finding the min and max values.
requant_range_node = create_node(
"RequantizationRange", original_node.name + "_eightbit_requant_range",
quantized_outputs)
set_attr_dtype(requant_range_node, "Tinput", dtypes.qint32)
self.add_output_graph_node(requant_range_node)
min_max_inputs = [
requant_range_node.name + ":0", requant_range_node.name + ":1"
]
requantize_node = create_node("Requantize",
original_node.name + "_eightbit_requantize",
quantized_outputs + min_max_inputs)
set_attr_dtype(requantize_node, "Tinput", dtypes.qint32)
set_attr_dtype(requantize_node, "out_type", dtypes.quint8)
self.add_output_graph_node(requantize_node)
return requantize_node.name
示例3: eightbitize_conv_node
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import qint32 [as 別名]
def eightbitize_conv_node(self, original_node):
"""Replaces a Conv2D node with the eight bit equivalent sub-graph."""
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
all_input_names)
copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
set_attr_dtype(quantized_conv_node, "Tinput", dtypes.quint8)
set_attr_dtype(quantized_conv_node, "Tfilter", dtypes.quint8)
set_attr_dtype(quantized_conv_node, "out_type", dtypes.qint32)
self.add_output_graph_node(quantized_conv_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_conv_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
示例4: eightbitize_bias_add_node
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import qint32 [as 別名]
def eightbitize_bias_add_node(self, original_node):
"""Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
quantized_bias_add_name = (
original_node.name + "_eightbit_quantized_bias_add")
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_bias_add_node = create_node("QuantizedBiasAdd",
quantized_bias_add_name,
all_input_names)
set_attr_dtype(quantized_bias_add_node, "T1", dtypes.quint8)
set_attr_dtype(quantized_bias_add_node, "T2", dtypes.quint8)
set_attr_dtype(quantized_bias_add_node, "out_type", dtypes.qint32)
self.add_output_graph_node(quantized_bias_add_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_bias_add_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
示例5: eightbitize_batch_norm_node
# 需要導入模塊: from tensorflow.python.framework import dtypes [as 別名]
# 或者: from tensorflow.python.framework.dtypes import qint32 [as 別名]
def eightbitize_batch_norm_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
namespace_prefix = original_node.name + "_eightbit"
original_input_name = original_node.input[0]
original_mean_name = original_node.input[1]
original_variance_name = original_node.input[2]
original_beta_name = original_node.input[3]
original_gamma_name = original_node.input[4]
quantized_batch_norm_name = namespace_prefix + "_quantized_batch_norm"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name))
quantize_mean_name, min_mean_name, max_mean_name = (
self.eightbitize_input_to_node(namespace_prefix, original_mean_name,
reshape_dims_name, reduction_dims_name))
quantize_variance_name, min_variance_name, max_variance_name = (
self.eightbitize_input_to_node(namespace_prefix, original_variance_name,
reshape_dims_name, reduction_dims_name))
quantize_beta_name, min_beta_name, max_beta_name = (
self.eightbitize_input_to_node(namespace_prefix, original_beta_name,
reshape_dims_name, reduction_dims_name))
quantize_gamma_name, min_gamma_name, max_gamma_name = (
self.eightbitize_input_to_node(namespace_prefix, original_gamma_name,
reshape_dims_name, reduction_dims_name))
quantized_batch_norm_node = create_node(
"QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name,
[
quantize_input_name, min_input_name, max_input_name,
quantize_mean_name, min_mean_name, max_mean_name,
quantize_variance_name, min_variance_name, max_variance_name,
quantize_beta_name, min_beta_name, max_beta_name,
quantize_gamma_name, min_gamma_name, max_gamma_name
])
set_attr_dtype(quantized_batch_norm_node, "Tinput", dtypes.quint8)
set_attr_dtype(quantized_batch_norm_node, "out_type", dtypes.qint32)
copy_attr(quantized_batch_norm_node, "scale_after_normalization",
original_node.attr["scale_after_normalization"])
copy_attr(quantized_batch_norm_node, "variance_epsilon",
original_node.attr["variance_epsilon"])
self.add_output_graph_node(quantized_batch_norm_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_batch_norm_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)