本文整理汇总了Python中onnx.mapping.NP_TYPE_TO_TENSOR_TYPE属性的典型用法代码示例。如果您正苦于以下问题:Python mapping.NP_TYPE_TO_TENSOR_TYPE属性的具体用法?Python mapping.NP_TYPE_TO_TENSOR_TYPE怎么用?Python mapping.NP_TYPE_TO_TENSOR_TYPE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类onnx.mapping
的用法示例。
在下文中一共展示了mapping.NP_TYPE_TO_TENSOR_TYPE属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: perform_import_export
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def perform_import_export(graph_proto, input_shape):
""" Import ONNX model to mxnet model and then export to ONNX model
and then import it back to mxnet for verifying the result"""
graph = GraphProto()
sym, arg_params, aux_params = graph.from_onnx(graph_proto)
params = {}
params.update(arg_params)
params.update(aux_params)
# exporting to onnx graph proto format
converter = MXNetGraph()
graph_proto = converter.create_onnx_graph_proto(sym, params, in_shape=input_shape, in_type=mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('float32')])
# importing back to MXNET for verifying result.
sym, arg_params, aux_params = graph.from_onnx(graph_proto)
return sym, arg_params, aux_params
示例2: np_dtype_to_tensor_type_name
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def np_dtype_to_tensor_type_name(data_type): # type: (np.dtype) -> str
"""Return TensorProto type name respective to provided numpy dtype.
:param data_type: Numpy dtype we want to convert.
:return: String representation of TensorProto type name.
"""
return TensorProto.DataType.Name(NP_TYPE_TO_TENSOR_TYPE[data_type])
示例3: np_dtype_to_tensor_type
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def np_dtype_to_tensor_type(data_type): # type: (np.type) -> int
"""Return TensorProto type for provided numpy dtype.
:param data_type: Numpy data type object.
:return: TensorProto.DataType enum value for corresponding type.
"""
return NP_TYPE_TO_TENSOR_TYPE[data_type]
示例4: rewrite_onnx_tensor_type
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def rewrite_onnx_tensor_type(xtensor_type, new_type):
if new_type.dtype is not None:
xtensor_type.elem_type = mapping.NP_TYPE_TO_TENSOR_TYPE[new_type.dtype]
if new_type.shape is not None:
xtensor_type.shape.Clear()
for d in new_type.shape:
xtensor_type.shape.dim.add().dim_value = d
示例5: version_11
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def version_11(cls, node, **kwargs):
default_dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('float32')]
dtype = data_type.onnx2tf(node.attrs.get("dtype", default_dtype))
ragged = tf.RaggedTensor.from_row_lengths(values=[], row_lengths=[])
sparse = tf.cast(ragged.to_sparse(), dtype)
return [tf.RaggedTensor.from_sparse(sparse)]
示例6: tf2onnx
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def tf2onnx(dtype):
if isinstance(dtype, Number):
tf_dype = tf.as_dtype(dtype)
elif isinstance(dtype, tf.DType):
tf_dype = dtype
elif isinstance(dtype, list):
return [tf2onnx(t) for t in dtype]
else:
raise RuntimeError("dtype should be number or tf.DType.")
# Usually, tf2onnx is done via tf_type->numpy_type->onnx_type
# to leverage existing type conversion infrastructure;
# However, we need to intercept the string type early because
# lowering tf.string type to numpy dtype results in loss of
# information. <class 'object'> is returned instead of the
# numpy string type desired.
if tf_dype is tf.string:
return TensorProto.STRING
onnx_dtype = None
try:
onnx_dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(
tf_dype.as_numpy_dtype)]
finally:
if onnx_dtype is None:
common.logger.warning(
"Can't convert tf dtype {} to ONNX dtype. Return 0 (TensorProto.UNDEFINED)."
.format(tf_dype))
onnx_dtype = TensorProto.UNDEFINED
return onnx_dtype
示例7: any_dtype_to_onnx_dtype
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def any_dtype_to_onnx_dtype(np_dtype=None, tf_dtype=None, onnx_dtype=None):
dtype_mask = [1 if val else 0 for val in [np_dtype, tf_dtype, onnx_dtype]]
num_type_set = sum(dtype_mask)
assert num_type_set == 1, "One and only one type must be set. However, {} set.".format(
sum(num_type_set))
if np_dtype:
onnx_dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[np_dtype]
if tf_dtype:
onnx_dtype = tf2onnx(tf_dtype)
return onnx_dtype
示例8: from_array
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def from_array(arr, name=None): # type: (np.ndarray[Any], Optional[Text]) -> TensorProto
"""Converts a numpy array to a tensor def.
Inputs:
arr: a numpy array.
name: (optional) the name of the tensor.
Returns:
tensor_def: the converted tensor def.
"""
tensor = TensorProto()
tensor.dims.extend(arr.shape)
if name:
tensor.name = name
if arr.dtype == np.object:
# Special care for strings.
raise NotImplementedError("Need to properly implement string.")
# For numerical types, directly use numpy raw bytes.
try:
dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]
except KeyError:
raise RuntimeError(
"Numpy data type not understood yet: {}".format(str(arr.dtype)))
tensor.data_type = dtype
tensor.raw_data = arr.tobytes() # note: tobytes() is only after 1.9.
return tensor
示例9: _argminmax_nodes
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def _argminmax_nodes(op_name, func, input_names, output_names, context):
gb = onnx_helper.GraphBuilder()
target_input_names = input_names
axis = func.axis
if axis is None:
shape_name = context.add_const(np.array([-1], dtype=np.int64), 'shape')
input_names.append(shape_name)
target_input_names = [gb.op('Reshape', input_names)]
axis = 0
out = gb.op(op_name, target_input_names, axis=axis, keepdims=0)
# Chainer's ArgMax always return value as int32
# Cast spec is changed from opset6, this logic does not support ~opset5
gb.op('Cast', [out], to=NP_TYPE_TO_TENSOR_TYPE[np.dtype('int32')])
return gb.nodes(output_names)
示例10: convert_Cast
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def convert_Cast(func, opset_version, input_names, output_names, context):
typ = func.type if isinstance(func.type, np.dtype) else np.dtype(func.type)
if opset_version == 1:
return onnx_helper.make_node(
'Cast', input_names, output_names,
to=TENSOR_TYPE_TO_NAME[NP_TYPE_TO_TENSOR_TYPE[typ]]
),
elif opset_version == 6:
return onnx_helper.make_node(
'Cast', input_names, output_names,
to=NP_TYPE_TO_TENSOR_TYPE[typ]
),
示例11: convert_SelectItem
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def convert_SelectItem(func, opset_version, input_names, output_names,
context):
gb = onnx_helper.GraphBuilder()
if opset_version >= 11:
t = gb.op('Unsqueeze', [input_names[1]], axes=[1])
out = gb.op('GatherElements', [input_names[0], t], axis=1)
gb.op('Squeeze', [out], axes=[1])
else:
data, target_idxs = input_names
target_idxs = gb.op('Cast', [target_idxs],
to=NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')])
n_rows = gb.op('Shape', [target_idxs])
# This is an equivalent of using Range.
one_1 = onnx.helper.make_tensor(
'one_1', onnx.TensorProto.FLOAT, [1], [1])
ones = gb.op('ConstantOfShape', [n_rows], value=one_1)
row_idxs = gb.op('Squeeze', [gb.op('NonZero', [ones])])
data_shape = gb.op('Shape', [data])
one_2 = context.add_const(np.array([1]), 'one_2')
n_cols = gb.op('Gather', [data_shape, one_2], axis=0)
data = gb.op('Squeeze', [gb.op('Flatten', [data], axis=2)])
target_idxs = gb.op(
'Add', [target_idxs, gb.op('Mul', [row_idxs, n_cols])])
gb.op('Gather', [data, target_idxs], axis=0)
return gb.nodes(output_names)
示例12: to_onnx_type
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def to_onnx_type(dt_type):
# TensorFlow data types integrate seamlessly with numpy
return mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dt_type.as_numpy_dtype)]
示例13: from_mxnet
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def from_mxnet(model_file, weight_file, input_shape, input_type, log=False):
mx_weights = mx.ndarray.load(weight_file)
with open(model_file, 'r') as f:
graph = json.loads(f.read())["nodes"]
converter = MxNetToONNXConverter()
onnx_graph = converter.convert_mx2onnx_graph(graph, mx_weights, input_shape, mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(input_type)], log=log)
onnx_model = helper.make_model(onnx_graph)
return onnx_model
示例14: convert_weights_and_inputs
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def convert_weights_and_inputs(node, **kwargs):
name = node["name"]
if looks_like_weight(name):
weights = kwargs["weights"]
initializer = kwargs["initializer"]
weights = kwargs["weights"]
np_arr = weights[name]
data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np_arr.dtype]
dims = np.shape(np_arr)
tensor_node = helper.make_tensor_value_info(name, data_type, dims)
initializer.append(
helper.make_tensor(
name=name,
data_type=data_type,
dims=dims,
vals=np_arr.flatten().tolist(),
raw=False,
)
)
return tensor_node
else:
tval_node = helper.make_tensor_value_info(name, kwargs["in_type"], kwargs["in_shape"])
return tval_node
示例15: tensor_type
# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE [as 别名]
def tensor_type(type_str):
"""Return the tensor type from a string descriptor."""
return mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(type_str.lower())]