本文整理匯總了Python中onnx.mapping方法的典型用法代碼示例。如果您正苦於以下問題:Python onnx.mapping方法的具體用法?Python onnx.mapping怎麽用?Python onnx.mapping使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類onnx
的用法示例。
在下文中一共展示了onnx.mapping方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_constant
# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import mapping [as 別名]
def test_constant(value_type):
values = np.random.randn(5, 5).astype(value_type)
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['values'],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(value_type)],
dims=values.shape,
vals=values.flatten()))
ng_results = run_node(node, [])
assert np.allclose(ng_results, [values])
# See https://github.com/onnx/onnx/issues/1190
示例2: run_transform
# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import mapping [as 別名]
def run_transform(self, graph, node):
mapping = self.get_mapping(node)
bn, mul, add = mapping['bn'], mapping['mul'], mapping['add']
t = graph.tensor_dict
scale = t[bn.input[1]]
bias = t[bn.input[2]]
_mul_tensor = t.get(mul.input[0], t[mul.input[1]])
mul_tensor = np.squeeze(_mul_tensor)
_add_tensor = t.get(add.input[0], t[add.input[1]])
add_tensor = np.squeeze(_add_tensor)
# multiply scale and bias
t[bn.input[1]] = np.multiply(scale, mul_tensor)
_bias = np.multiply(bias, mul_tensor)
t[bn.input[2]] = np.add(_bias, add_tensor)
# connect output of bn to output of add
bn.output[0] = add.output[0]
# remove mul and add nodes
graph.remove_node(mul.name)
graph.remove_node(add.name)
return graph
示例3: test_cast_to_bool
# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import mapping [as 別名]
def test_cast_to_bool(val_type, input_data):
expected = np.array(input_data, dtype=val_type)
model = get_node_model('Cast', input_data, opset=6,
to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type])
result = run_model(model, [input_data])
assert np.allclose(result, expected)
示例4: test_cast_to_float
# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import mapping [as 別名]
def test_cast_to_float(val_type, range_start, range_end, in_dtype):
np.random.seed(133391)
input_data = np.random.randint(range_start, range_end, size=(2, 2), dtype=in_dtype)
expected = np.array(input_data, dtype=val_type)
model = get_node_model('Cast', input_data, opset=6,
to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type])
result = run_model(model, [input_data])
assert np.allclose(result, expected)
示例5: test_cast_to_int
# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import mapping [as 別名]
def test_cast_to_int(val_type):
np.random.seed(133391)
input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=val_type)
model = get_node_model('Cast', input_data, opset=6,
to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type])
result = run_model(model, [input_data])
assert np.allclose(result, expected)
示例6: test_cast_to_uint
# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import mapping [as 別名]
def test_cast_to_uint(val_type):
np.random.seed(133391)
input_data = np.ceil(np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=val_type)
model = get_node_model('Cast', input_data, opset=6,
to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type])
result = run_model(model, [input_data])
assert np.allclose(result, expected)
示例7: get_dtype
# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import mapping [as 別名]
def get_dtype(self): # type: () -> numpy.dtype
"""Return the Numpy data type for this value."""
return onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[self.type.tensor_type.elem_type]
示例8: get_mapping
# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import mapping [as 別名]
def get_mapping(self, node, pattern=None):
'''
Given that `node` is the root of a matched subgraph, returns a dict
mapping names of the OpTypePatterns to their matched OnnxNodes
Parameters
----------
node : :class:`OnnxNode` object
The root node of a matching subgraph.
pattern : :class:`OpTypePattern` object, optional
The matching pattern. If None, defaults to self.pattern.
Returns
-------
dict
key, value of OpTypePattern name and OnnxNode
'''
if pattern is None:
if self.pattern is None:
raise ValueError('No pattern to match.')
pattern = self.pattern
mapping_dict = {}
def _mapping(node, pattern, mapping_dict):
if pattern.name is None:
raise ValueError('Cannot generate mapping dict,'
' OpTypePattern name is None.')
mapping_dict[pattern.name] = node
for child, child_pattern in zip(node.children, pattern.outputs):
_mapping(child, child_pattern, mapping_dict)
return mapping_dict
return _mapping(node, pattern, mapping_dict)
示例9: is_eligible
# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import mapping [as 別名]
def is_eligible(self, graph, node):
mapping = self.get_mapping(node)
bn, mul, add = mapping['bn'], mapping['mul'], mapping['add']
# only spatial batchnorm is supported
if bn.attrs.get('spatial') is not None and bn.attrs['spatial'] != 1:
return False
# mul and add must be initialized by some tensor
if (mul.input[0] not in graph.tensor_dict and
mul.input[1] not in graph.tensor_dict):
return False
if (add.input[0] not in graph.tensor_dict and
add.input[1] not in graph.tensor_dict):
return False
t = graph.tensor_dict
scale = t[bn.input[1]]
bias = t[bn.input[2]]
_mul_tensor = t.get(mul.input[0], t[mul.input[1]])
mul_tensor = np.squeeze(_mul_tensor)
_add_tensor = t.get(add.input[0], t[add.input[1]])
add_tensor = np.squeeze(_add_tensor)
# check mul is broadcastable
if mul_tensor.shape != scale.shape or mul_tensor.shape != bias.shape:
if mul_tensor.shape != (1,) and mul_tensor.shape != ():
return False
# check add is broadcastable
if add_tensor.shape != bias.shape:
if add_tensor.shape != (1,) and add_tensor.shape != ():
return False
return True
示例10: _impl_v5
# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import mapping [as 別名]
def _impl_v5(cls, inputs, attr, params):
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
attr['to'] = str(TENSOR_TYPE_TO_NP_TYPE[attr['to']])
except ImportError as e:
raise ImportError(
"Unable to import onnx.mapping which is required {}".format(e))
return AttrCvt(op_name='cast', transforms={'to': 'dtype'})(inputs, attr)
示例11: _parse_dtype
# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import mapping [as 別名]
def _parse_dtype(self, value_proto, dtype):
"""Parse dtype."""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
return TENSOR_TYPE_TO_NP_TYPE[value_proto.type.tensor_type.elem_type].name
except AttributeError:
return dtype