本文整理汇总了Python中onnx.TensorProto.INT32属性的典型用法代码示例。如果您正苦于以下问题:Python TensorProto.INT32属性的具体用法?Python TensorProto.INT32怎么用?Python TensorProto.INT32使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类onnx.TensorProto
的用法示例。
在下文中一共展示了TensorProto.INT32属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cast_input_to_int32
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def cast_input_to_int32(self, input_name):
cast_output = input_name + '_int32'
# Avoid consequent Cast nodes.
inputs = [input_name]
output_name_to_node = self.output_name_to_node()
if input_name in output_name_to_node:
parent_node = output_name_to_node[input_name]
if parent_node and parent_node.op_type == 'Cast':
inputs = [parent_node.input[0]]
cast_node = onnx.helper.make_node('Cast', inputs=inputs, outputs=[cast_output])
cast_node.attribute.extend([onnx.helper.make_attribute("to", int(TensorProto.INT32))])
self.add_node(cast_node)
return cast_output, cast_node
示例2: test_eye_like
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def test_eye_like(self):
if legacy_opset_pre_ver(9):
raise unittest.SkipTest("ONNX version {} doesn't support EyeLike.".format(
defs.onnx_opset_version()))
shape = [6, 10]
off_diagonal_offset = -3
x = self._get_rnd_int(0, 100, shape=shape)
y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)
node_def = helper.make_node("EyeLike", ["x"], ["y"],
dtype=TensorProto.FLOAT,
k=off_diagonal_offset)
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("x", TensorProto.INT32, [None, None])
],
outputs=[
helper.make_tensor_value_info("y", TensorProto.FLOAT, [None, None])
])
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
output = tf_rep.run({"x": x})
np.testing.assert_equal(output["y"], y)
示例3: test_gather_nd
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def test_gather_nd(self):
if legacy_opset_pre_ver(11):
raise unittest.SkipTest(
"ONNX version {} doesn't support GatherND.".format(
defs.onnx_opset_version()))
# valid positive and negative indices for elements
data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
indices = np.array([[0, 0], [1, -3]], dtype=np.int64)
ref_output = np.array([1, 4], dtype=np.int32)
node_def = helper.make_node("GatherND", ["data", "indices"], ["outputs"])
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("data", TensorProto.INT32,
[None, None]),
helper.make_tensor_value_info("indices", TensorProto.INT64,
[None, None])
],
outputs=[
helper.make_tensor_value_info("outputs", TensorProto.INT32, [None])
])
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
output = tf_rep.run({"data": data, "indices": indices})
np.testing.assert_almost_equal(output["outputs"], ref_output)
示例4: verify_gather
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def verify_gather(in_shape, indices, axis, dtype):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int32")
out_np = np.take(x, indices, axis=axis)
y = helper.make_node("Gather", ['in', 'indices'], ['out'], axis=axis)
graph = helper.make_graph([y],
'gather_test',
inputs = [helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices",
TensorProto.INT32, list(indices.shape))],
outputs = [helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(out_np.shape))])
model = helper.make_model(graph, producer_name='gather_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [x, indices], target, ctx, out_np.shape)
np.testing.assert_allclose(out_np, tvm_out)
示例5: verify_gather
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def verify_gather(in_shape, indices, axis, dtype):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int32")
out_np = np.take(x, indices, axis=axis)
y = helper.make_node("Gather", ['in', 'indices'], ['out'], axis=axis)
graph = helper.make_graph([y],
'gather_test',
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices",
TensorProto.INT32, list(indices.shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(out_np.shape))])
model = helper.make_model(graph, producer_name='gather_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [x, indices], target, ctx, out_np.shape)
tvm.testing.assert_allclose(out_np, tvm_out)
示例6: verify_scatter
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def verify_scatter(in_shape, indices, axis):
x = np.random.uniform(size=in_shape).astype("float32")
indices = np.array(indices, dtype="int32")
updates = np.random.uniform(size=indices.shape).astype("float32")
y = helper.make_node("ScatterElements", ['data', 'indices', 'updates'], ['output'], axis=axis)
graph = helper.make_graph([y],
'scatter_test',
inputs=[helper.make_tensor_value_info("data",
TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices",
TensorProto.INT32, list(indices.shape)),
helper.make_tensor_value_info("updates",
TensorProto.FLOAT, list(indices.shape))],
outputs=[helper.make_tensor_value_info("output",
TensorProto.FLOAT, list(in_shape))])
model = helper.make_model(graph, producer_name='scatter_test')
onnx_out = get_onnxruntime_output(model, [x, indices, updates])
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [x, indices, updates], target, ctx, onnx_out[0].shape)
tvm.testing.assert_allclose(onnx_out[0], tvm_out)
示例7: verify_gather_nd
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def verify_gather_nd(in_shape, indices, dtype):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int32")
out_np = topi.testing.gather_nd_python(x, indices)
y = helper.make_node("GatherND", ['in', 'indices'], ['out'])
graph = helper.make_graph([y],
'gather_test',
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices",
TensorProto.INT32, list(indices.shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(out_np.shape))])
model = helper.make_model(graph, producer_name='gather_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [x, indices], target, ctx, out_np.shape)
tvm.testing.assert_allclose(out_np, tvm_out)
示例8: cast_graph_input_to_int32
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def cast_graph_input_to_int32(self, input_name):
graph_input = self.find_graph_input(input_name)
if graph_input is not None and graph_input.type.tensor_type.elem_type != TensorProto.INT32:
cast_output, cast_node = self.cast_input_to_int32(input_name)
logger.debug(f"Casted graph input {input_name} to int32")
return True, cast_output
logger.debug(f"Did not cast graph input {input_name} to int32: found {graph_input is not None}")
return False, input_name
示例9: remove_cast_int32
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def remove_cast_int32(self, input_name):
input_name_to_nodes = self.input_name_to_nodes()
nodes = input_name_to_nodes[input_name]
for node in nodes:
if node.op_type == "Cast":
is_int32 = False
for att in node.attribute:
if att.name == 'to' and att.i == int(TensorProto.INT32):
is_int32 = True
break
if is_int32:
output_name = node.output[0]
self.remove_node(node)
self.replace_input_of_all_nodes(output_name, input_name)
示例10: change_input_to_int32
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def change_input_to_int32(self):
original_opset_version = self.model.opset_import[0].version
graph = self.graph()
batch_size, sequence_length = self.get_bert_input_shape()
new_graph_inputs = []
bert_inputs = self.get_bert_inputs()
for input in graph.input:
if input.name in bert_inputs:
self.remove_cast_int32(input.name)
input_shape = [batch_size if isinstance(batch_size, int) else 1, sequence_length if isinstance(sequence_length, int) else 128]
int32_input = onnx.helper.make_tensor_value_info(input.name, TensorProto.INT32, input_shape)
new_graph_inputs.append(int32_input)
else:
new_graph_inputs.append(input)
graph_def = onnx.helper.make_graph(graph.node,
'int32 inputs',
new_graph_inputs,
graph.output,
initializer=graph.initializer,
value_info=graph.value_info)
self.model = onnx.helper.make_model(graph_def, producer_name='bert model optimizer')
if isinstance(batch_size, str) or isinstance(sequence_length, str):
self.use_dynamic_axes(batch_size if isinstance(batch_size, str) else None, sequence_length if isinstance(sequence_length, str) else None)
# restore opset version
self.model.opset_import[0].version = original_opset_version
示例11: call
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def call(self, env, x):
return castto(x.to_tensor(env).name, TensorProto.INT32, env)
示例12: _transform_coreml_dtypes
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def _transform_coreml_dtypes(builder, # type : NeuralNetworkBuilder
inputs, # type: List[EdgeInfo]
outputs # type: List[EdgeInfo]
):
# type: (...) -> None
''' Make sure ONNX input/output data types are mapped to the equivalent CoreML types
'''
for i, input_ in enumerate(inputs):
onnx_type = input_[1]
if onnx_type == TensorProto.FLOAT:
_update_multiarray_to_float32(builder.spec.description.input[i])
elif onnx_type == TensorProto.DOUBLE:
continue
elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
_update_multiarray_to_int32(builder.spec.description.input[i])
elif onnx_type == TensorProto.BOOL:
_update_multiarray_to_float32(builder.spec.description.input[i])
else:
raise TypeError("Input must be of of type FLOAT, DOUBLE, INT32 or INT64")
for i, output_ in enumerate(outputs):
onnx_type = output_[1]
if onnx_type == TensorProto.FLOAT:
_update_multiarray_to_float32(builder.spec.description.output[i])
elif onnx_type == TensorProto.DOUBLE:
continue
elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
_update_multiarray_to_int32(builder.spec.description.output[i])
elif onnx_type == TensorProto.BOOL:
_update_multiarray_to_float32(builder.spec.description.output[i])
else:
raise TypeError("Output must be of of type FLOAT, DOUBLE, INT32 or INT64")
示例13: _convert_cast
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def _convert_cast(builder, node, graph, err):
'''
Perform cast operation in CoreML
e.g. Casting from Float (assumed) to Int maps to Floor Layer
For Others, add copy layer
'''
convert_to = node.attrs.get('to')
convert_to_int = set({TensorProto.UINT8, TensorProto.INT8, TensorProto.UINT16, TensorProto.INT32,
TensorProto.INT64, TensorProto.UINT32, TensorProto.UINT64})
## TODO: Add support for conversion from STRING TO FLOAT
## Currently, such input will error out in parsing
if convert_to in convert_to_int:
builder.add_floor(
name=node.name,
input_name=node.inputs[0],
output_name=node.outputs[0]
)
else:
load_input_constants(builder, node, graph, err)
builder.add_activation(
name=node.name,
non_linearity = 'LINEAR',
input_name=node.inputs[0],
output_name=node.outputs[0],
params=[1.0, 0.0]
)
示例14: test_cast
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def test_cast(self):
if legacy_onnx_pre_ver(1, 2) or legacy_opset_pre_ver(6):
test_cases = [("FLOAT", tf.float32), ("UINT8", tf.uint8),
("INT8", tf.int8),
("UINT16", tf.uint16), ("INT16", tf.int16),
("INT32", tf.int32), ("INT64", tf.int64), ("BOOL", tf.bool),
("FLOAT16", tf.float16), ("DOUBLE", tf.float64),
("COMPLEX64", tf.complex64), ("COMPLEX128", tf.complex128)]
else:
test_cases = [(TensorProto.FLOAT, tf.float32),
(TensorProto.UINT8, tf.uint8), (TensorProto.INT8, tf.int8),
(TensorProto.UINT16, tf.uint16),
(TensorProto.INT16, tf.int16),
(TensorProto.INT32, tf.int32),
(TensorProto.INT64, tf.int64), (TensorProto.BOOL, tf.bool),
(TensorProto.FLOAT16, tf.float16),
(TensorProto.DOUBLE, tf.float64),
(TensorProto.COMPLEX64, tf.complex64),
(TensorProto.COMPLEX128, tf.complex128)]
if not legacy_opset_pre_ver(9):
test_cases.append((TensorProto.STRING, tf.string))
for ty, tf_type in test_cases:
node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
vector = [2, 3]
output = run_node(node_def, [vector])
np.testing.assert_equal(output["output"].dtype, tf_type)
if not legacy_opset_pre_ver(9):
test_cases2 = [(TensorProto.FLOAT, tf.float32),
(TensorProto.INT32, tf.int32),
(TensorProto.INT64, tf.int64),
(TensorProto.DOUBLE, tf.float64)]
for ty, tf_type in test_cases2:
node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
vector = ['2', '3']
output = run_node(node_def, [vector])
np.testing.assert_equal(output["output"].dtype, tf_type)
示例15: test_constant_of_shape
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import INT32 [as 别名]
def test_constant_of_shape(self):
if defs.onnx_opset_version() < 9:
raise unittest.SkipTest(
"ONNX version {} doesn't support ConstantOfShape.".format(
defs.onnx_opset_version()))
v = helper.make_tensor("value", TensorProto.FLOAT, [1], [1])
node_def = helper.make_node("ConstantOfShape", ["X"], ["Y"], value=v)
x = np.array([4, 3, 2])
output = run_node(node_def, inputs=[x])
np.testing.assert_almost_equal(output["Y"], np.ones(x, dtype=np.float32))
v = helper.make_tensor("value", TensorProto.INT32, [1], [0])
node_def = helper.make_node("ConstantOfShape", ["X"], ["Y"], value=v)
x = np.array([10, 6])
output = run_node(node_def, inputs=[x])
np.testing.assert_almost_equal(output["Y"], np.zeros(x, dtype=np.int32))