本文整理汇总了Python中tensorflow.qint8方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.qint8方法的具体用法?Python tensorflow.qint8怎么用?Python tensorflow.qint8使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.qint8方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: args_check
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import qint8 [as 别名]
def args_check(cls, node, **kwargs):
supported_dtype = [
tf.bfloat16, tf.half, tf.float32, tf.float64, tf.uint8, tf.int8,
tf.int16, tf.int32, tf.int64, tf.complex64, tf.quint8, tf.qint8,
tf.qint32, tf.string, tf.bool, tf.complex128
]
x = kwargs["tensor_dict"][node.inputs[0]]
if x.dtype not in supported_dtype:
exception.OP_UNSUPPORTED_EXCEPT(
"Equal inputs in " + str(x.dtype) + " which", "Tensorflow")
示例2: _testDequantizeOp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import qint8 [as 别名]
def _testDequantizeOp(self, inputs, min_range, max_range, dtype):
with self.test_session():
input_op = tf.constant(inputs, shape=[len(inputs)], dtype=dtype)
dequantized = tf.dequantize(
input_op, min_range, max_range)
tf_ans = dequantized.eval()
# TODO(vrv): Add support for DT_QINT32 quantization if needed.
type_dict = {
tf.quint8: np.uint8,
tf.qint8: np.int8,
tf.quint16: np.uint16,
tf.qint16: np.int16
}
self.assertTrue(dtype in type_dict.keys())
v_max = np.iinfo(type_dict[dtype]).max
v_min = np.iinfo(type_dict[dtype]).min
self.assertTrue(min_range >= v_min)
self.assertTrue(max_range <= v_max)
type_range = v_max - v_min
if v_min < 0:
half_range = (type_range + 1) / 2
else:
half_range = 0.0
np_ans = ((inputs.astype(np.float32) + half_range) *
(max_range - min_range) / type_range) + min_range
self.assertAllClose(tf_ans, np_ans)
示例3: testBasicQint8
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import qint8 [as 别名]
def testBasicQint8(self):
self._testDequantizeOp(np.array([-128, 0, 127]),
-1.0, 2.0, tf.qint8)
self._testDequantizeOp(np.array([-2, 4, -17]),
-5.0, -3.0, tf.qint8)
self._testDequantizeOp(np.array([0, -4, 42, -108]),
5.0, 40.0, tf.qint8)
示例4: testStringConversion
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import qint8 [as 别名]
def testStringConversion(self):
self.assertIs(tf.float32, tf.as_dtype("float32"))
self.assertIs(tf.float64, tf.as_dtype("float64"))
self.assertIs(tf.int32, tf.as_dtype("int32"))
self.assertIs(tf.uint8, tf.as_dtype("uint8"))
self.assertIs(tf.uint16, tf.as_dtype("uint16"))
self.assertIs(tf.int16, tf.as_dtype("int16"))
self.assertIs(tf.int8, tf.as_dtype("int8"))
self.assertIs(tf.string, tf.as_dtype("string"))
self.assertIs(tf.complex64, tf.as_dtype("complex64"))
self.assertIs(tf.complex128, tf.as_dtype("complex128"))
self.assertIs(tf.int64, tf.as_dtype("int64"))
self.assertIs(tf.bool, tf.as_dtype("bool"))
self.assertIs(tf.qint8, tf.as_dtype("qint8"))
self.assertIs(tf.quint8, tf.as_dtype("quint8"))
self.assertIs(tf.qint32, tf.as_dtype("qint32"))
self.assertIs(tf.bfloat16, tf.as_dtype("bfloat16"))
self.assertIs(tf.float32_ref, tf.as_dtype("float32_ref"))
self.assertIs(tf.float64_ref, tf.as_dtype("float64_ref"))
self.assertIs(tf.int32_ref, tf.as_dtype("int32_ref"))
self.assertIs(tf.uint8_ref, tf.as_dtype("uint8_ref"))
self.assertIs(tf.int16_ref, tf.as_dtype("int16_ref"))
self.assertIs(tf.int8_ref, tf.as_dtype("int8_ref"))
self.assertIs(tf.string_ref, tf.as_dtype("string_ref"))
self.assertIs(tf.complex64_ref, tf.as_dtype("complex64_ref"))
self.assertIs(tf.complex128_ref, tf.as_dtype("complex128_ref"))
self.assertIs(tf.int64_ref, tf.as_dtype("int64_ref"))
self.assertIs(tf.bool_ref, tf.as_dtype("bool_ref"))
self.assertIs(tf.qint8_ref, tf.as_dtype("qint8_ref"))
self.assertIs(tf.quint8_ref, tf.as_dtype("quint8_ref"))
self.assertIs(tf.qint32_ref, tf.as_dtype("qint32_ref"))
self.assertIs(tf.bfloat16_ref, tf.as_dtype("bfloat16_ref"))
with self.assertRaises(TypeError):
tf.as_dtype("not_a_type")
示例5: testDtypes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import qint8 [as 别名]
def testDtypes(self):
# Spot check a few.
config_str = """
# Test without tf prefix, but using the prefix is strongly recommended!
configurable.float32 = %float32
# Test with tf prefix.
configurable.string = %tf.string
configurable.qint8 = %tf.qint8
"""
config.parse_config(config_str)
vals = configurable()
self.assertIs(vals['float32'], tf.float32)
self.assertIs(vals['string'], tf.string)
self.assertIs(vals['qint8'], tf.qint8)
示例6: test_qint8
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import qint8 [as 别名]
def test_qint8():
tf_qint8 = as_dtype(tf.qint8).as_datatype_enum
np_int8 = DataTypeConverter.get_generic_value(tf_qint8)
assert np_int8 == np.dtype('int8')
assert isinstance(np_int8, DataTypeConverter.__utensor_generic_type__)
assert isinstance(DataTypeConverter.get_tf_value(np_int8),
DataTypeConverter.__tfproto_type__)
示例7: load_quantized_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import qint8 [as 别名]
def load_quantized_model(model, ckpt_path, session, name):
"""Loads quantized model and dequantizes variables"""
start_time = time.time()
dequant_ops = []
for tsr in tf.trainable_variables():
with tf.variable_scope(tsr.name.split(":")[0], reuse=True):
quant_tsr = tf.get_variable("quantized", dtype=tf.qint8)
min_range = tf.get_variable("min_range")
max_range = tf.get_variable("max_range")
dequant_ops.append(tsr.assign(tf.dequantize(quant_tsr, min_range, max_range, "SCALED")))
restore_list = [tsr for tsr in tf.global_variables() if tsr not in tf.trainable_variables()]
saver = tf.train.Saver(restore_list)
try:
saver.restore(session, ckpt_path)
except tf.errors.NotFoundError as e:
utils.print_out("Can't load checkpoint")
print_variables_in_ckpt(ckpt_path)
utils.print_out("%s" % str(e))
session.run(tf.tables_initializer())
session.run(dequant_ops)
utils.print_out(
" loaded %s model parameters from %s, time %.2fs"
% (name, ckpt_path, time.time() - start_time)
)
return model
示例8: add_quatization_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import qint8 [as 别名]
def add_quatization_variables(model):
"""Add to graph quantization variables"""
with model.graph.as_default():
for tsr in tf.trainable_variables():
with tf.variable_scope(tsr.name.split(":")[0]):
output, min_range, max_range = tf.quantize(
tsr, tf.reduce_min(tsr), tf.reduce_max(tsr), tf.qint8, mode="SCALED"
)
tf.get_variable(
"quantized",
initializer=output,
trainable=False,
collections=[_QUANTIZATION_COLLECTION],
)
tf.get_variable(
"min_range",
initializer=min_range,
trainable=False,
collections=[_QUANTIZATION_COLLECTION],
)
tf.get_variable(
"max_range",
initializer=max_range,
trainable=False,
collections=[_QUANTIZATION_COLLECTION],
)
示例9: testQuantizedTypes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import qint8 [as 别名]
def testQuantizedTypes(self):
# Test with array.
data = [(21,), (22,), (23,)]
t = tensor_util.make_tensor_proto(data, dtype=tf.qint32)
self.assertProtoEquals("""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\000\000\026\000\000\000\027\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.qint32.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=tf.quint8)
self.assertProtoEquals("""
dtype: DT_QUINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.quint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=tf.qint8)
self.assertProtoEquals("""
dtype: DT_QINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.qint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=tf.quint16)
self.assertProtoEquals("""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.quint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=tf.qint16)
self.assertProtoEquals("""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.qint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)