当前位置: 首页>>代码示例>>Python>>正文


Python mapping.TENSOR_TYPE_TO_NP_TYPE属性代码示例

本文整理汇总了Python中onnx.mapping.TENSOR_TYPE_TO_NP_TYPE属性的典型用法代码示例。如果您正苦于以下问题:Python mapping.TENSOR_TYPE_TO_NP_TYPE属性的具体用法?Python mapping.TENSOR_TYPE_TO_NP_TYPE怎么用?Python mapping.TENSOR_TYPE_TO_NP_TYPE使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在onnx.mapping的用法示例。


在下文中一共展示了mapping.TENSOR_TYPE_TO_NP_TYPE属性的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: cast

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def cast(attrs, inputs, proto_obj):
    """ Cast input to a given dtype"""
    try:
        from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
    except ImportError:
        raise ImportError("Onnx and protobuf need to be installed. "
                          + "Instructions to install - https://github.com/onnx/onnx")
    new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})
    new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])]
    return 'cast', new_attrs, inputs 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:12,代码来源:_op_translations.py

示例2: onnx_tensor_type_to_numpy_type

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def onnx_tensor_type_to_numpy_type(data_type):  # type: (Any) -> np.dtype
    """Return ONNX TensorProto type mapped into numpy dtype.

    :param data_type: The type we want to convert from.
    :return: Converted numpy dtype.
    """
    if type(data_type) is int:
        return TENSOR_TYPE_TO_NP_TYPE[data_type]
    elif type(data_type) is str:
        return TENSOR_TYPE_TO_NP_TYPE[TensorProto.DataType.Value(data_type)]
    else:
        raise ValueError('Unsupported data type representation (%s).', str(type(data_type))) 
开发者ID:NervanaSystems,项目名称:ngraph-onnx,代码行数:14,代码来源:types.py

示例3: rewrite_onnx_model

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def rewrite_onnx_model(xmodel, new_input_types):
    xgraph = xmodel.graph

    # Update parameter types.
    new_param_dtype = new_input_types[0].dtype
    if new_param_dtype is not None:
        old_param_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[
            xgraph.input[0].type.tensor_type.elem_type]
        initializers = {i.name: i for i in xgraph.initializer}
        for input in xgraph.input:
            if input.name not in initializers:
                continue
            param_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[
                input.type.tensor_type.elem_type]
            if param_dtype != old_param_dtype:
                sys.stderr.write(
                    'WARNING: This assumes all parameters have the same dtype '
                    'as the first input (%s) but the dtype of `%s` is %s\n' %
                    (old_param_dtype, input.name, param_dtype))
                continue

            new_type = Type(dtype=new_param_dtype)
            rewrite_onnx_tensor(initializers[input.name], new_type)
            rewrite_onnx_tensor_type(input.type.tensor_type, new_type)

    initializer_names = set(init.name for init in xgraph.initializer)
    inputs = [input for input in xgraph.input
              if input.name not in initializer_names]
    assert len(new_input_types) <= len(inputs)

    # Update input types.
    for input_type, input in zip(new_input_types, inputs):
        rewrite_onnx_tensor_type(input.type.tensor_type, input_type)

    for vi in xgraph.value_info:
        vi.type.Clear()
    for vi in xgraph.output:
        vi.type.Clear()

    return shape_inference.infer_shapes(xmodel) 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:42,代码来源:input_rewriter.py

示例4: rewrite_onnx_testdir

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def rewrite_onnx_testdir(model_testdir, out_testdir, new_input_types):
    os.makedirs(out_testdir, exist_ok=True)
    xmodel = rewrite_onnx_file(os.path.join(model_testdir, 'model.onnx'),
                               os.path.join(out_testdir, 'model.onnx'),
                               new_input_types)

    name_to_type = {}
    for vi in (list(xmodel.graph.input) +
               list(xmodel.graph.value_info) +
               list(xmodel.graph.output)):
        dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[vi.type.tensor_type.elem_type]
        shape = [d.dim_value for d in vi.type.tensor_type.shape.dim]
        name_to_type[vi.name] = Type(dtype=dtype, shape=shape)

    for test_set in glob.glob(os.path.join(model_testdir, 'test_data_set_*')):
        dest_dir = os.path.join(out_testdir, os.path.basename(test_set))
        os.makedirs(dest_dir, exist_ok=True)
        for tensor_proto in glob.glob(os.path.join(test_set, '*.pb')):
            xtensor = onnx.load_tensor(tensor_proto)
            if xtensor.name not in name_to_type:
                raise RuntimeError('Unknown tensor name: %s' % xtensor.name)
            rewrite_onnx_tensor(xtensor, name_to_type[xtensor.name])

            out_tensor_proto = os.path.join(dest_dir,
                                            os.path.basename(tensor_proto))
            onnx.save_tensor(xtensor, out_tensor_proto) 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:28,代码来源:input_rewriter.py

示例5: onnx2tf

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def onnx2tf(dtype):
  return tf.as_dtype(mapping.TENSOR_TYPE_TO_NP_TYPE[_onnx_dtype(dtype)]) 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:4,代码来源:data_type.py

示例6: _impl_v5

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def _impl_v5(cls, inputs, attr, params):
        try:
            from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
            attr['to'] = TENSOR_TYPE_TO_NP_TYPE[attr['to']]
        except ImportError as e:
            raise ImportError(
                "Unable to import onnx.mapping which is required {}".format(e))
        return AttrCvt(op_name='cast', transforms={'to': 'dtype'})(inputs, attr) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:10,代码来源:onnx.py

示例7: to_array

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def to_array(tensor):  # type: (TensorProto) -> np.ndarray[Any]
    """Converts a tensor def object to a numpy array.

    Inputs:
        tensor: a TensorProto object.
    Returns:
        arr: the converted array.
    """
    if tensor.HasField("segment"):
        raise ValueError(
            "Currently not supporting loading segments.")
    if tensor.data_type == TensorProto.UNDEFINED:
        raise ValueError("The data type is not defined.")
    if tensor.data_type == TensorProto.STRING:
        raise ValueError("Tensor data type STRING is not supported.")

    tensor_dtype = tensor.data_type
    np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[tensor_dtype]
    storage_type = mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[tensor_dtype]
    storage_np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[storage_type]
    storage_field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[storage_type]
    dims = tensor.dims

    if tensor.HasField("raw_data"):
        # Raw_bytes support: using frombuffer.
        return np.frombuffer(
            tensor.raw_data,
            dtype=np_dtype).reshape(dims)
    else:
        data = getattr(tensor, storage_field),  # type: Sequence[np.complex64]
        if (tensor_dtype == TensorProto.COMPLEX64 or
                tensor_dtype == TensorProto.COMPLEX128):
            data = combine_pairs_to_complex(data)
        return (
            np.asarray(
                data,
                dtype=storage_np_dtype)
            .astype(np_dtype)
            .reshape(dims)
        ) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:42,代码来源:numpy_helper.py

示例8: activation_process

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def activation_process(scope, operator, container, biased_tensor_name):
    # Create an activation function node and apply activation function to the intermediate tensor
    apply_activation_function = activation_map[operator.raw_operator.activation]
    if operator.raw_operator.activation in [activation_get('softmax'), keras.activations.softmax]:
        apply_softmax(scope, biased_tensor_name, operator.outputs[0].full_name, container, axis=-1)
    elif operator.raw_operator.activation in [tf.nn.relu6]:
        np_type = TENSOR_TYPE_TO_NP_TYPE[operator.inputs[0].type.to_onnx_type().tensor_type.elem_type]
        zero_value = np.zeros(shape=(1,), dtype=np_type)
        apply_relu_6(scope, biased_tensor_name, operator.outputs[0].full_name, container,
                     zero_value=zero_value)
    else:
        apply_activation_function(scope, biased_tensor_name, operator.outputs[0].full_name, container) 
开发者ID:onnx,项目名称:keras-onnx,代码行数:14,代码来源:common.py

示例9: convert_tf_relu6

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def convert_tf_relu6(scope, operator, container):
    oopb = OnnxOperatorBuilder(container, scope)
    np_type = TENSOR_TYPE_TO_NP_TYPE[operator.inputs[0].type.to_onnx_type().tensor_type.elem_type]
    zero_value = np.zeros(shape=(1,), dtype=np_type)
    oopb.apply_op_with_output("apply_relu_6",
                              operator.input_full_names,
                              operator.output_full_names,
                              name=operator.full_name + '_clip',
                              zero_value=zero_value) 
开发者ID:onnx,项目名称:keras-onnx,代码行数:11,代码来源:_builtin.py

示例10: _impl_v5

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def _impl_v5(cls, inputs, attr, params):
        try:
            from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
            attr['to'] = str(TENSOR_TYPE_TO_NP_TYPE[attr['to']])
        except ImportError as e:
            raise ImportError(
                "Unable to import onnx.mapping which is required {}".format(e))
        return AttrCvt(op_name='cast', transforms={'to': 'dtype'})(inputs, attr) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:10,代码来源:onnx.py

示例11: _parse_dtype

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def _parse_dtype(self, value_proto, dtype):
        """Parse dtype."""
        try:
            from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
            return TENSOR_TYPE_TO_NP_TYPE[value_proto.type.tensor_type.elem_type].name
        except AttributeError:
            return dtype 
开发者ID:apache,项目名称:incubator-tvm,代码行数:9,代码来源:onnx.py

示例12: _OnCast

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def _OnCast(self, node, inputs):
        np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[OnnxGraphContext.get_attribute(node, 'to')]
        casted = inputs[0].astype(np_dtype)
        return [casted] 
开发者ID:microsoft,项目名称:onnxconverter-common,代码行数:6,代码来源:_opt_const_folding.py

示例13: run

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def run(self,
            inputs,  # type: Any
            **kwargs  # type: Any
            ):
        # type: (...) -> Tuple[Any, ...]
        super(CoreMLRep, self).run(inputs, **kwargs)
        inputs_ = inputs
        _reshaped = False
        if not SupportedVersion.is_nd_array_supported(self.minimum_ios_deployment_target):
            for i, input_ in enumerate(inputs_):
                shape = input_.shape
                if len(shape) == 4 or len(shape) == 2:
                    inputs_[i] = input_[np.newaxis, :]
                    _reshaped = True
                elif len(shape) == 3:
                    spec = self.model.get_spec()
                    spec_shape = [int(k) for k in spec.description.input[i].type.multiArrayType.shape]
                    prod = spec_shape[0] * spec_shape[1] * spec_shape[2]
                    onnx_shape = list(shape)
                    if onnx_shape != spec_shape:
                        if onnx_shape[2] == prod:
                            inputs_[i] = np.reshape(inputs_[i], [onnx_shape[0], onnx_shape[1]] + spec_shape)
                        elif onnx_shape[1] * onnx_shape[2] == prod:
                            inputs_[i] = np.reshape(inputs_[i], [1, onnx_shape[0]] + spec_shape)
        input_dict = dict(
            zip(self.input_names,
                map(np.array, inputs_)))
        _set_dtypes(input_dict, self.model) #type: ignore

        prediction = self.model.predict(input_dict, self.useCPUOnly)
        output_values = [prediction[name] for name in self.output_names]

        if not SupportedVersion.is_nd_array_supported(self.minimum_ios_deployment_target):
            for i, output_ in enumerate(output_values):
                shape = output_.shape
                #reshape the CoreML output to match Onnx's output shape
                try:
                    output_values[i] = np.reshape(output_, self.onnx_outputs_info[self.output_names[i]][2])  # type: ignore
                except RuntimeError:
                    print("Output '%s' shape incompatible between CoreML (%s) and onnx (%s)"
                        %(self.output_names[i], output_.shape,
                            self.onnx_outputs_info[self.output_names[i]]))
        
        ## Type Cast to ONNX expected output types
        for i, output_ in enumerate(output_values):
            output_type = self.onnx_outputs_info[self.output_names[i]][1]
            if TENSOR_TYPE_TO_NP_TYPE[output_type] != output_values[i].dtype:
                output_values[i] = output_values[i].astype(TENSOR_TYPE_TO_NP_TYPE[output_type])

        result = namedtupledict('Outputs',
                              self.output_names)(*output_values)  # type: Tuple[Any, ...]
        return result 
开发者ID:onnx,项目名称:onnx-coreml,代码行数:54,代码来源:_backend_rep.py

示例14: convert_keras_activation

# 需要导入模块: from onnx import mapping [as 别名]
# 或者: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE [as 别名]
def convert_keras_activation(scope, operator, container):
    input_name = operator.input_full_names[0]
    output_name = operator.output_full_names[0]
    activation = operator.raw_operator.activation
    activation_type = type(activation)
    if activation in [activation_get('sigmoid'), keras.activations.sigmoid]:
        apply_sigmoid(scope, input_name, output_name, container)
    elif activation in [activation_get('tanh'), keras.activations.tanh]:
        apply_tanh(scope, input_name, output_name, container)
    elif activation in [activation_get('relu'), keras.activations.relu] or \
            (hasattr(keras.layers.advanced_activations, 'ReLU') and
             activation_type == keras.layers.advanced_activations.ReLU):
        apply_relu(scope, input_name, output_name, container)
    elif activation in [activation_get('softmax'), keras.activations.softmax] or \
            activation_type == keras.layers.advanced_activations.Softmax:
        apply_softmax(scope, input_name, output_name, container, axis=-1)
    elif activation in [activation_get('elu'), keras.activations.elu] or \
            activation_type == keras.layers.advanced_activations.ELU:
        apply_elu(scope, input_name, output_name, container, alpha=1.0)
    elif activation in [activation_get('hard_sigmoid'), keras.activations.hard_sigmoid]:
        apply_hard_sigmoid(scope, input_name, output_name, container, alpha=0.2, beta=0.5)
    elif activation in [activation_get('linear'), keras.activations.linear]:
        apply_identity(scope, input_name, output_name, container)
    elif activation in [activation_get('selu'), keras.activations.selu]:
        apply_selu(scope, input_name, output_name, container, alpha=1.673263, gamma=1.050701)
    elif activation_type == keras.layers.advanced_activations.LeakyReLU:
        apply_leaky_relu(scope, input_name, output_name, container, alpha=activation.alpha.item(0))
    elif activation_type == keras.layers.advanced_activations.PReLU:
        apply_prelu(scope, input_name, output_name, container, slope=operator.raw_operator.get_weights()[0])
    elif activation in [relu6] or (hasattr(activation, '__name__') and activation.__name__ == 'relu6'):
        # relu6(x) = min(relu(x), 6)
        np_type = TENSOR_TYPE_TO_NP_TYPE[operator.inputs[0].type.to_onnx_type().tensor_type.elem_type]
        zero_value = np.zeros(shape=(1,), dtype=np_type)
        apply_relu_6(scope, input_name, output_name, container,
                     zero_value=zero_value)
    elif hasattr(activation, '__name__') and activation.__name__ == 'swish':
        apply_sigmoid(scope, input_name, output_name + '_sig', container)
        apply_mul(scope, [input_name, output_name + '_sig'], output_name, container)
    else:
        if activation in [activation_get('softsign'), keras.activations.softsign]:
            op_type = 'Softsign'
        elif activation in [activation_get('softplus'), keras.activations.softplus]:
            op_type = 'Softplus'
        else:
            raise RuntimeError("Unsupported activation method within Activation layer '{}'".format(activation))

        container.add_node(op_type, operator.input_full_names, operator.output_full_names, name=operator.full_name) 
开发者ID:onnx,项目名称:keras-onnx,代码行数:49,代码来源:activation.py


注:本文中的onnx.mapping.TENSOR_TYPE_TO_NP_TYPE属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。