本文整理汇总了Python中onnx.TensorProto.BOOL属性的典型用法代码示例。如果您正苦于以下问题:Python TensorProto.BOOL属性的具体用法?Python TensorProto.BOOL怎么用?Python TensorProto.BOOL使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类onnx.TensorProto
的用法示例。
在下文中一共展示了TensorProto.BOOL属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_is_inf
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def test_is_inf(self):
if legacy_opset_pre_ver(10):
raise unittest.SkipTest("ONNX version {} doesn't support IsInf.".format(
defs.onnx_opset_version()))
inp = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf],
dtype=np.float32)
expected_output = np.isinf(inp)
node_def = helper.make_node("IsInf", ["X"], ["Y"])
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, [None]),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.BOOL, [None])])
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
output = tf_rep.run({"X": inp})
np.testing.assert_equal(output["Y"], expected_output)
示例2: _make_fake_if_op
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def _make_fake_if_op(self,
true_nodes, # type: Sequence[NodeProto]
false_nodes, # type: Sequence[NodeProto]
output_types # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
): # type: (...) -> List[NodeProto]
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
if_inputs = ["condition"]
if_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
else_branch=false_graph)
]
return retval_nodes
# fn is a function that takes a single node as argument
示例3: _test_finite_ops
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def _test_finite_ops(inshape, outfunc, npargs, dtype, opname, kwargs):
indata = np.random.choice(a=[np.nan, np.inf, -np.inf, 0.5, 1.0, 0], size=inshape).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ['in'], ['out'], **kwargs)
graph = helper.make_graph([y],
opname+'_test',
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.BOOL, list(outdata.shape))])
model = helper.make_model(graph, producer_name=opname+'_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, indata, target, ctx, outdata.shape, dtype)
tvm.testing.assert_allclose(outdata, tvm_out)
示例4: verify_not
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def verify_not(indata, dtype):
x = indata.astype(dtype)
outdata = np.logical_not(x)
node = helper.make_node('Not', inputs=['in'], outputs=['out'],)
graph = helper.make_graph([node],
'not_test',
inputs=[helper.make_tensor_value_info(
"in", TensorProto.BOOL, list(x.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))])
model = helper.make_model(graph, producer_name='not_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [x], target, ctx, outdata.shape)
tvm.testing.assert_allclose(outdata, tvm_out)
示例5: verify_and
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def verify_and(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_and(x, y)
node = helper.make_node('And', inputs=['in1', 'in2'], outputs=['out'], )
graph = helper.make_graph([node],
'and_test',
inputs=[helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))])
model = helper.make_model(graph, producer_name='and_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [x, y], target, ctx, outdata.shape)
tvm.testing.assert_allclose(outdata, tvm_out)
示例6: verify_or
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def verify_or(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_or(x, y)
node = helper.make_node('Or', inputs=['in1', 'in2'], outputs=['out'], )
graph = helper.make_graph([node],
'or_test',
inputs=[helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))])
model = helper.make_model(graph, producer_name='or_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [x, y], target, ctx, outdata.shape)
tvm.testing.assert_allclose(outdata, tvm_out)
示例7: _transform_coreml_dtypes
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def _transform_coreml_dtypes(builder, # type : NeuralNetworkBuilder
inputs, # type: List[EdgeInfo]
outputs # type: List[EdgeInfo]
):
# type: (...) -> None
''' Make sure ONNX input/output data types are mapped to the equivalent CoreML types
'''
for i, input_ in enumerate(inputs):
onnx_type = input_[1]
if onnx_type == TensorProto.FLOAT:
_update_multiarray_to_float32(builder.spec.description.input[i])
elif onnx_type == TensorProto.DOUBLE:
continue
elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
_update_multiarray_to_int32(builder.spec.description.input[i])
elif onnx_type == TensorProto.BOOL:
_update_multiarray_to_float32(builder.spec.description.input[i])
else:
raise TypeError("Input must be of of type FLOAT, DOUBLE, INT32 or INT64")
for i, output_ in enumerate(outputs):
onnx_type = output_[1]
if onnx_type == TensorProto.FLOAT:
_update_multiarray_to_float32(builder.spec.description.output[i])
elif onnx_type == TensorProto.DOUBLE:
continue
elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
_update_multiarray_to_int32(builder.spec.description.output[i])
elif onnx_type == TensorProto.BOOL:
_update_multiarray_to_float32(builder.spec.description.output[i])
else:
raise TypeError("Output must be of of type FLOAT, DOUBLE, INT32 or INT64")
示例8: add_output_proto
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def add_output_proto(self, node):
output_onnx_type = node.attr.get("T", TensorProto.BOOL)
for i, output_shape in enumerate(node.attr["_output_shapes"]):
output_name = node.name + ":{}".format(i) if i > 0 else node.name
self._outputs_proto.append(
make_tensor_value_info(output_name, output_onnx_type, output_shape))
示例9: add_value_info_proto
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def add_value_info_proto(self, node):
node_onnx_type = node.attr.get("T", TensorProto.BOOL)
for i, output_shape in enumerate(node.attr["_output_shapes"]):
node_name = node.name + ":{}".format(i) if i > 0 else node.name
value_info_proto = make_tensor_value_info(node_name, node_onnx_type,
output_shape)
self._value_info_proto.append(value_info_proto)
# Remove proto in inputs_proto and consts_proto
# if proto is not used as input or an output in ONNX
示例10: test_cast
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def test_cast(self):
if legacy_onnx_pre_ver(1, 2) or legacy_opset_pre_ver(6):
test_cases = [("FLOAT", tf.float32), ("UINT8", tf.uint8),
("INT8", tf.int8),
("UINT16", tf.uint16), ("INT16", tf.int16),
("INT32", tf.int32), ("INT64", tf.int64), ("BOOL", tf.bool),
("FLOAT16", tf.float16), ("DOUBLE", tf.float64),
("COMPLEX64", tf.complex64), ("COMPLEX128", tf.complex128)]
else:
test_cases = [(TensorProto.FLOAT, tf.float32),
(TensorProto.UINT8, tf.uint8), (TensorProto.INT8, tf.int8),
(TensorProto.UINT16, tf.uint16),
(TensorProto.INT16, tf.int16),
(TensorProto.INT32, tf.int32),
(TensorProto.INT64, tf.int64), (TensorProto.BOOL, tf.bool),
(TensorProto.FLOAT16, tf.float16),
(TensorProto.DOUBLE, tf.float64),
(TensorProto.COMPLEX64, tf.complex64),
(TensorProto.COMPLEX128, tf.complex128)]
if not legacy_opset_pre_ver(9):
test_cases.append((TensorProto.STRING, tf.string))
for ty, tf_type in test_cases:
node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
vector = [2, 3]
output = run_node(node_def, [vector])
np.testing.assert_equal(output["output"].dtype, tf_type)
if not legacy_opset_pre_ver(9):
test_cases2 = [(TensorProto.FLOAT, tf.float32),
(TensorProto.INT32, tf.int32),
(TensorProto.INT64, tf.int64),
(TensorProto.DOUBLE, tf.float64)]
for ty, tf_type in test_cases2:
node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
vector = ['2', '3']
output = run_node(node_def, [vector])
np.testing.assert_equal(output["output"].dtype, tf_type)
示例11: test_compress
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def test_compress(self):
if legacy_opset_pre_ver(9):
raise unittest.SkipTest(
"ONNX version {} doesn't support Compress.".format(
defs.onnx_opset_version()))
axis = 1
node_def = helper.make_node("Compress",
inputs=['X', 'condition'],
outputs=['Y'],
axis=axis)
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT,
[None, None, None]),
helper.make_tensor_value_info("condition", TensorProto.BOOL, [None])
],
outputs=[
helper.make_tensor_value_info("Y", TensorProto.FLOAT,
[None, None, None])
])
x = self._get_rnd_float32(shape=[5, 5, 5])
cond = np.array([1, 0, 1])
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
output = tf_rep.run({"X": x, "condition": cond})
np.testing.assert_almost_equal(output['Y'], np.compress(cond, x, axis=axis))
示例12: _make_fake_loop_op
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def _make_fake_loop_op(self,
body_nodes, # type: Sequence[NodeProto]
input_types, # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
output_types # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
): # type: (...) -> List[NodeProto]
zero = helper.make_tensor("trip_count_value", TensorProto.INT32, (), [10])
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
# lcd is a dummy loop-carried dependency that only exists because
# right now the schema checker is broken and assumes a variadic
# input needs at least one value.
graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT32, ()),
helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in input_types:
graph_inputs.append(helper.make_tensor_value_info("_" + name, type, shape))
graph_outputs = [helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in output_types:
graph_outputs.append(helper.make_tensor_value_info("_" + name, type, shape))
body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
graph_outputs)
loop_inputs = ["trip_count", "condition"]
loop_inputs.extend([name for _, _, name in input_types])
# TODO: fix checker to accept 0-input variadic inputs
if len(loop_inputs) == 2:
loop_inputs.append("")
loop_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["trip_count"], value=zero),
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("Loop", loop_inputs, loop_outputs, body=body_graph)
]
return retval_nodes
示例13: test_nested_graph
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def test_nested_graph(self): # type: () -> None
n1 = helper.make_node(
"Scale", ["X"], ["Y"], scale=2., name="n1")
n2 = helper.make_node(
"Scale", ["Y"], ["Z"], scale=3., name="n2")
graph = helper.make_graph(
[n1, n2],
"nested",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])
],
outputs=[
helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1, 2])
]
)
i1 = helper.make_node(
"If", ["cond"], ["Z"], then_branch=graph, else_branch=graph)
graph = helper.make_graph(
[i1],
"test",
inputs=[
helper.make_tensor_value_info("cond", TensorProto.BOOL, [1])
],
outputs=[],
)
checker.check_graph(graph)
#self.assertRaises(checker.ValidationError, checker.check_graph, graph)
示例14: _logical_binary_op
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def _logical_binary_op(self, op, input_type): # type: (Text, TensorProto.DataType) -> None
graph = self._make_graph(
[('x', input_type, (30, 4, 5)),
('y', input_type, (30, 4, 5))],
[make_node(op, ['x', 'y'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.BOOL, (30, 4, 5))])
示例15: test_logical_and
# 需要导入模块: from onnx import TensorProto [as 别名]
# 或者: from onnx.TensorProto import BOOL [as 别名]
def test_logical_and(self): # type: () -> None
self._logical_binary_op('And', TensorProto.BOOL)