本文整理汇总了Python中onnx.TensorProto.INT8属性的典型用法代码示例。如果您正苦于以下问题:Python TensorProto.INT8属性的具体用法?Python TensorProto.INT8怎么用?Python TensorProto.INT8使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类onnx.TensorProto
的用法示例。
在下文中一共展示了TensorProto.INT8属性的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _convert_cast
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT8 [as 别名]
def _convert_cast(builder, node, graph, err):
'''
Perform cast operation in CoreML
e.g. Casting from Float (assumed) to Int maps to Floor Layer
For Others, add copy layer
'''
convert_to = node.attrs.get('to')
convert_to_int = set({TensorProto.UINT8, TensorProto.INT8, TensorProto.UINT16, TensorProto.INT32,
TensorProto.INT64, TensorProto.UINT32, TensorProto.UINT64})
## TODO: Add support for conversion from STRING TO FLOAT
## Currently, such input will error out in parsing
if convert_to in convert_to_int:
builder.add_floor(
name=node.name,
input_name=node.inputs[0],
output_name=node.outputs[0]
)
else:
load_input_constants(builder, node, graph, err)
builder.add_activation(
name=node.name,
non_linearity = 'LINEAR',
input_name=node.inputs[0],
output_name=node.outputs[0],
params=[1.0, 0.0]
)
示例2: test_cast
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT8 [as 别名]
def test_cast(self):
if legacy_onnx_pre_ver(1, 2) or legacy_opset_pre_ver(6):
test_cases = [("FLOAT", tf.float32), ("UINT8", tf.uint8),
("INT8", tf.int8),
("UINT16", tf.uint16), ("INT16", tf.int16),
("INT32", tf.int32), ("INT64", tf.int64), ("BOOL", tf.bool),
("FLOAT16", tf.float16), ("DOUBLE", tf.float64),
("COMPLEX64", tf.complex64), ("COMPLEX128", tf.complex128)]
else:
test_cases = [(TensorProto.FLOAT, tf.float32),
(TensorProto.UINT8, tf.uint8), (TensorProto.INT8, tf.int8),
(TensorProto.UINT16, tf.uint16),
(TensorProto.INT16, tf.int16),
(TensorProto.INT32, tf.int32),
(TensorProto.INT64, tf.int64), (TensorProto.BOOL, tf.bool),
(TensorProto.FLOAT16, tf.float16),
(TensorProto.DOUBLE, tf.float64),
(TensorProto.COMPLEX64, tf.complex64),
(TensorProto.COMPLEX128, tf.complex128)]
if not legacy_opset_pre_ver(9):
test_cases.append((TensorProto.STRING, tf.string))
for ty, tf_type in test_cases:
node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
vector = [2, 3]
output = run_node(node_def, [vector])
np.testing.assert_equal(output["output"].dtype, tf_type)
if not legacy_opset_pre_ver(9):
test_cases2 = [(TensorProto.FLOAT, tf.float32),
(TensorProto.INT32, tf.int32),
(TensorProto.INT64, tf.int64),
(TensorProto.DOUBLE, tf.float64)]
for ty, tf_type in test_cases2:
node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
vector = ['2', '3']
output = run_node(node_def, [vector])
np.testing.assert_equal(output["output"].dtype, tf_type)
示例3: _convert_cast
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT8 [as 别名]
def _convert_cast(builder, node, graph, err):
"""
Perform cast operation in CoreML
e.g. Casting from Float (assumed) to Int maps to Floor Layer
For Others, add copy layer
"""
convert_to = node.attrs.get("to")
convert_to_int = set(
{
TensorProto.UINT8,
TensorProto.INT8,
TensorProto.UINT16,
TensorProto.INT32,
TensorProto.INT64,
TensorProto.UINT32,
TensorProto.UINT64,
}
)
## TODO: Add support for conversion from STRING TO FLOAT
## Currently, such input will error out in parsing
if convert_to in convert_to_int:
builder.add_floor(
name=node.name, input_name=node.inputs[0], output_name=node.outputs[0]
)
else:
load_input_constants(builder, node, graph, err)
builder.add_activation(
name=node.name,
non_linearity="LINEAR",
input_name=node.inputs[0],
output_name=node.outputs[0],
params=[1.0, 0.0],
)
示例4: test_all_tensors_f32
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT8 [as 别名]
def test_all_tensors_f32():
top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2])
mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2])
top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
modelproto = oh.make_model(
oh.make_graph(
name="test",
inputs=[top_in],
outputs=[top_out],
value_info=[add_param, mul_param],
nodes=[
oh.make_node("Add", ["top_in", "add_param"], ["middle"]),
oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]),
],
)
)
model = ModelWrapper(modelproto)
model = model.transform(InferShapes())
ret = model.analysis(ta.all_tensors_f32)
assert ret["all_tensors_f32"] is True
top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
add_param = oh.make_tensor_value_info("add_param", TensorProto.INT8, [2])
mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2])
top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
modelproto = oh.make_model(
oh.make_graph(
name="test",
inputs=[top_in],
outputs=[top_out],
value_info=[add_param, mul_param],
nodes=[
oh.make_node("Add", ["top_in", "add_param"], ["middle"]),
oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]),
],
)
)
model = ModelWrapper(modelproto)
model = model.transform(InferShapes())
ret = model.analysis(ta.all_tensors_f32)
assert ret["all_tensors_f32"] is False