当前位置: 首页>>代码示例>>Python>>正文


Python dtypes.quint8方法代码示例

本文整理汇总了Python中tensorflow.python.framework.dtypes.quint8方法的典型用法代码示例。如果您正苦于以下问题:Python dtypes.quint8方法的具体用法?Python dtypes.quint8怎么用?Python dtypes.quint8使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.framework.dtypes的用法示例。


在下文中一共展示了dtypes.quint8方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_dequantize_result_node

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import quint8 [as 别名]
def add_dequantize_result_node(self,
                                 quantized_output_name,
                                 original_node_name,
                                 min_tensor_index=1):
    min_max_inputs = [
        "%s:%s" % (quantized_output_name, min_tensor_index),
        "%s:%s" % (quantized_output_name, (min_tensor_index + 1))
    ]
    dequantize_name = original_node_name
    if self.should_merge_with_fake_quant_node():
      fake_quant_node = self.state.output_node_stack[-1][0]
      if original_node_name not in self.state.merged_with_fake_quant:
        min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
        self.state.merged_with_fake_quant[original_node_name] = True
      dequantize_name = fake_quant_node.name

    dequantize_node = create_node(
        "Dequantize", dequantize_name,
        [quantized_output_name, min_max_inputs[0], min_max_inputs[1]])
    set_attr_dtype(dequantize_node, "T", dtypes.quint8)
    set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
    self.add_output_graph_node(dequantize_node) 
开发者ID:googlecodelabs,项目名称:tensorflow-for-poets-2,代码行数:24,代码来源:quantize_graph.py

示例2: eightbitize_input_to_node

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import quint8 [as 别名]
def eightbitize_input_to_node(self, namespace_prefix, original_input_name,
                                reshape_dims_name, reduction_dims_name):
    """Takes one float input to an op, and converts it to quantized form."""
    unique_input_name = unique_node_name_from_input(original_input_name)
    reshape_input_name = namespace_prefix + "_reshape_" + unique_input_name
    min_input_name = namespace_prefix + "_min_" + unique_input_name
    max_input_name = namespace_prefix + "_max_" + unique_input_name
    quantize_input_name = namespace_prefix + "_quantize_" + unique_input_name
    reshape_input_node = create_node("Reshape", reshape_input_name,
                                     [original_input_name, reshape_dims_name])
    set_attr_dtype(reshape_input_node, "T", dtypes.float32)
    self.add_output_graph_node(reshape_input_node)
    min_input_node = create_node("Min", min_input_name,
                                 [reshape_input_name, reduction_dims_name])
    set_attr_dtype(min_input_node, "T", dtypes.float32)
    set_attr_bool(min_input_node, "keep_dims", False)
    self.add_output_graph_node(min_input_node)
    max_input_node = create_node("Max", max_input_name,
                                 [reshape_input_name, reduction_dims_name])
    set_attr_dtype(max_input_node, "T", dtypes.float32)
    set_attr_bool(max_input_node, "keep_dims", False)
    self.add_output_graph_node(max_input_node)
    quantize_input_node = create_node(
        "QuantizeV2", quantize_input_name,
        [original_input_name, min_input_name, max_input_name])
    set_attr_dtype(quantize_input_node, "T", dtypes.quint8)
    set_attr_string(quantize_input_node, "mode", b"MIN_FIRST")
    self.add_output_graph_node(quantize_input_node)
    min_output_name = quantize_input_name + ":1"
    max_output_name = quantize_input_name + ":2"
    return quantize_input_name, min_output_name, max_output_name 
开发者ID:googlecodelabs,项目名称:tensorflow-for-poets-2,代码行数:33,代码来源:quantize_graph.py

示例3: add_quantize_down_nodes

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import quint8 [as 别名]
def add_quantize_down_nodes(self, original_node, quantized_output_name):
    quantized_outputs = [
        quantized_output_name, quantized_output_name + ":1",
        quantized_output_name + ":2"
    ]
    min_max_inputs = None
    if self.should_merge_with_fake_quant_node():
      # Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
      # Requantize.
      fake_quant_node = self.state.output_node_stack[-1][0]
      min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
      assert original_node.name not in self.state.merged_with_fake_quant
      self.state.merged_with_fake_quant[original_node.name] = True
    elif self.fallback_quantization_range:
      min_max_inputs = [
          "fallback_quantization_min_value:0",
          "fallback_quantization_max_value:0"
      ]
    else:
      # Add a RequantizationRange node for finding the min and max values.
      requant_range_node = create_node(
          "RequantizationRange", original_node.name + "_eightbit_requant_range",
          quantized_outputs)
      set_attr_dtype(requant_range_node, "Tinput", dtypes.qint32)
      self.add_output_graph_node(requant_range_node)
      min_max_inputs = [
          requant_range_node.name + ":0", requant_range_node.name + ":1"
      ]
    requantize_node = create_node("Requantize",
                                  original_node.name + "_eightbit_requantize",
                                  quantized_outputs + min_max_inputs)
    set_attr_dtype(requantize_node, "Tinput", dtypes.qint32)
    set_attr_dtype(requantize_node, "out_type", dtypes.quint8)
    self.add_output_graph_node(requantize_node)
    return requantize_node.name 
开发者ID:googlecodelabs,项目名称:tensorflow-for-poets-2,代码行数:37,代码来源:quantize_graph.py

示例4: eightbitize_conv_node

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import quint8 [as 别名]
def eightbitize_conv_node(self, original_node):
    """Replaces a Conv2D node with the eight bit equivalent sub-graph."""
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
    quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
                                      all_input_names)
    copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
    copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
    set_attr_dtype(quantized_conv_node, "Tinput", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "Tfilter", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_conv_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_conv_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
开发者ID:googlecodelabs,项目名称:tensorflow-for-poets-2,代码行数:17,代码来源:quantize_graph.py

示例5: eightbitize_bias_add_node

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import quint8 [as 别名]
def eightbitize_bias_add_node(self, original_node):
    """Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
    quantized_bias_add_name = (
        original_node.name + "_eightbit_quantized_bias_add")
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_bias_add_node = create_node("QuantizedBiasAdd",
                                          quantized_bias_add_name,
                                          all_input_names)
    set_attr_dtype(quantized_bias_add_node, "T1", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "T2", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_bias_add_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_bias_add_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
开发者ID:googlecodelabs,项目名称:tensorflow-for-poets-2,代码行数:17,代码来源:quantize_graph.py

示例6: add_pool_function

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import quint8 [as 别名]
def add_pool_function(self, original_node, quantized_op_node):
    set_attr_dtype(quantized_op_node, "T", dtypes.quint8)
    copy_attr(quantized_op_node, "ksize", original_node.attr["ksize"])
    copy_attr(quantized_op_node, "strides", original_node.attr["strides"])
    copy_attr(quantized_op_node, "padding", original_node.attr["padding"]) 
开发者ID:googlecodelabs,项目名称:tensorflow-for-poets-2,代码行数:7,代码来源:quantize_graph.py

示例7: add_relu_function

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import quint8 [as 别名]
def add_relu_function(self, unused_arg_node, quantized_op_node):
    set_attr_dtype(quantized_op_node, "Tinput", dtypes.quint8) 
开发者ID:googlecodelabs,项目名称:tensorflow-for-poets-2,代码行数:4,代码来源:quantize_graph.py

示例8: eightbitize_placeholder_node

# 需要导入模块: from tensorflow.python.framework import dtypes [as 别名]
# 或者: from tensorflow.python.framework.dtypes import quint8 [as 别名]
def eightbitize_placeholder_node(self, current_node):
    """Replaces a placeholder node with a quint8 placeholder node+dequantize."""
    name = current_node.name

    # Convert the placeholder into a quantized type.
    output_node = node_def_pb2.NodeDef()
    output_node.CopyFrom(current_node)
    set_attr_dtype(output_node, "dtype", dtypes.quint8)
    output_node.name += "_original_input"
    self.add_output_graph_node(output_node)

    # Add a dequantize to convert back to float.
    dequantize_node = create_node("Dequantize", name, [
        output_node.name, "quantized_input_min_value",
        "quantized_input_max_value"
    ])
    set_attr_dtype(dequantize_node, "T", dtypes.quint8)
    set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
    self.add_output_graph_node(dequantize_node)

    # For the descent over the graph to work, the dequantize node must be named
    # current_node.name.  However, for the feeding of the graph to work, the
    # placeholder must have the name current_node.name; so record a final set
    # of renames to apply after all processing has been done.
    self.final_node_renames[output_node.name] = name
    self.final_node_renames[dequantize_node.name] = name + "_dequantize" 
开发者ID:googlecodelabs,项目名称:tensorflow-for-poets-2,代码行数:28,代码来源:quantize_graph.py


注:本文中的tensorflow.python.framework.dtypes.quint8方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。