當前位置: 首頁>>代碼示例>>Python>>正文


Python onnx.ModelProto方法代碼示例

本文整理匯總了Python中onnx.ModelProto方法的典型用法代碼示例。如果您正苦於以下問題:Python onnx.ModelProto方法的具體用法?Python onnx.ModelProto怎麽用?Python onnx.ModelProto使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在onnx的用法示例。


在下文中一共展示了onnx.ModelProto方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: optimize_onnx_model

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def optimize_onnx_model(origin_model, nchw_inputs=None, stop_initializers=None):
    # type: (onnx.ModelProto, list, list) -> onnx.ModelProto
    """
    the origin model will be updated after the optimization.
    :param origin_model:
    :param nchw_inputs:
    :return:
    """
    graph = origin_model.graph
    nodelist = list(graph.node)

    opt_graph = optimize_onnx_graph(nodelist,
                                    nchw_inputs=nchw_inputs,
                                    inputs=graph.input,
                                    outputs=graph.output,
                                    initializers=list(graph.initializer),
                                    stop_initializers=stop_initializers,
                                    model_value_info=graph.value_info,
                                    model_name=graph.name,
                                    target_opset=next(opset_.version for opset_ in origin_model.opset_import
                                                      if opset_.domain == '' or opset_.domain == 'ai.onnx'))

    origin_model.graph.CopyFrom(opt_graph)
    return origin_model 
開發者ID:microsoft,項目名稱:onnxconverter-common,代碼行數:26,代碼來源:optimizer.py

示例2: run_model

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def run_model(onnx_model, data_inputs):
    # type: (onnx.ModelProto, List[np.ndarray]) -> List[np.ndarray]
    """
    Convert ONNX model to an ngraph model and perform computation on input data.

    :param onnx_model: ONNX ModelProto describing an ONNX model
    :param data_inputs: list of numpy ndarrays with input data
    :return: list of numpy ndarrays with computed output
    """
    NgraphBackend.backend_name = BACKEND_NAME
    if NgraphBackend.supports_ngraph_device(NgraphBackend.backend_name):
        ng_model_function = import_onnx_model(onnx_model)
        runtime = get_runtime()
        computation = runtime.computation(ng_model_function)
        return computation(*data_inputs)
    else:
        raise RuntimeError('The requested nGraph backend <'
                           + NgraphBackend.backend_name + '> is not supported!') 
開發者ID:NervanaSystems,項目名稱:ngraph-onnx,代碼行數:20,代碼來源:__init__.py

示例3: generate_model

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def generate_model(self, inputs, outputs, graph, model) -> 'ModelProto':

        # assign param names
        self.param2name = {id(p): 'param' + n.replace('/', '_')
                           for n, p in model.namedparams()}

        for p, n in self.param2name.items():
            assigned_names.append(n)

        # assign onnx name
        assign_onnx_name(graph)

        graph_ = self.generate_graph(inputs, outputs, graph, None, True)
        onnx_model = oh.make_model(
            graph_, producer_name="elichika", producer_version="0.1")
        return onnx_model 
開發者ID:pfnet-research,項目名稱:chainer-compiler,代碼行數:18,代碼來源:onnx_converters.py

示例4: _get_onnx_outputs_info

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def _get_onnx_outputs_info(model): # type: (...) -> Dict[Text, EdgeInfo]
    """
    Takes in an onnx model and returns a dictionary 
    of onnx output names mapped to a tuple that is (output_name, type, shape)
    """
    if isinstance(model, str):
        onnx_model = onnx.load(model)
    elif isinstance(model, onnx.ModelProto):
        onnx_model = model

    graph = onnx_model.graph
    onnx_output_dict = {}
    for o in graph.output:
        out = _input_from_onnx_input(o)
        onnx_output_dict[out[0]] = out
    return onnx_output_dict 
開發者ID:onnx,項目名稱:onnx-coreml,代碼行數:18,代碼來源:_backend.py

示例5: prepare

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def prepare(cls,
                model,  # type: ModelProto
                device='CPU',  # type: Text
                minimum_ios_deployment_target='12', # type: str
                **kwargs  # type: Any
                ):
        # type: (...) -> CoreMLRep
        super(CoreMLBackend, cls).prepare(model, device, **kwargs)
        if DEBUG:
            with open('/tmp/node_model.onnx', 'wb') as f:
                s = model.SerializeToString()
                f.write(s)
        coreml_model = convert(model, minimum_ios_deployment_target=minimum_ios_deployment_target)
        if DEBUG:
            coreml_model.save('/tmp/node_model.mlmodel')
        onnx_outputs_info = _get_onnx_outputs_info(model)
        return CoreMLRep(coreml_model, onnx_outputs_info, device == 'CPU', minimum_ios_deployment_target=minimum_ios_deployment_target) 
開發者ID:onnx,項目名稱:onnx-coreml,代碼行數:19,代碼來源:_backend.py

示例6: is_compatible

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def is_compatible(cls,
                       model,  # type: ModelProto
                       device='CPU',  # type: Text
                       **kwargs  # type: Any
                       ):  # type: (...) -> bool
        # Return whether the model is compatible with CoreML.
        '''
        This function will gradually grow to cover more cases. 
        Need to be careful of false negatives. There are some cases that seemingly 
        are not supported on CoreML, which the graph transformer optimizes and converts to 
        a graph that can be converted to CoreML. 
        
        2. Unsupported ops: If graph has one of unsupported op, exit
           
        '''
        ## TODO: Add un-supported ops
        unsupported_ops = []
        graph = model.graph
        for node in graph.node:
            if node.op_type in unsupported_ops:
                return False
        return True 
開發者ID:onnx,項目名稱:onnx-coreml,代碼行數:24,代碼來源:_backend.py

示例7: __init__

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def __init__(self, model):
		"""
		This constructor takes a reference to a ONNX Model and checks model, infers intermediate shapes and sets up maps from name to type and node or constant value
		graph_util.convert_variables_to_constants and graph_util.remove_training_nodes to cleanse the graph of any nodes that are linked to training. This leaves us with 
		the nodes you need for inference. 
		In the resulting graph there should only be tf.Operations left that have one of the following types [Const, MatMul, Add, BiasAdd, Conv2D, Reshape, MaxPool, AveragePool, Placeholder, Relu, Sigmoid, Tanh]
		If the input should be a Keras model we will ignore operations with type Pack, Shape, StridedSlice, and Prod such that the Flatten layer can be used.
		
		Arguments
		---------
		model : onnx.ModelProto
		"""
		if issubclass(model.__class__, onnx.ModelProto):
			onnx.checker.check_model(model)
			self.model = model
			self.nodes = self.model.graph.node

			self.shape_map, self.constants_map, self.output_node_map, self.input_node_map, self.placeholdernames = prepare_model(model)
		else:
			assert 0, 'not onnx model' 
開發者ID:eth-sri,項目名稱:eran,代碼行數:22,代碼來源:onnx_translator.py

示例8: _get_onnx_outputs_info

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def _get_onnx_outputs_info(model):  # type: (...) -> Dict[Text, EdgeInfo]
    """
    Takes in an onnx model and returns a dictionary
    of onnx output names mapped to a tuple that is (output_name, type, shape)
    """
    if isinstance(model, _string_types):
        onnx_model = onnx.load(model)
    elif isinstance(model, onnx.ModelProto):
        onnx_model = model

    graph = onnx_model.graph
    onnx_output_dict = {}
    for o in graph.output:
        out = _input_from_onnx_input(o)
        onnx_output_dict[out[0]] = out
    return onnx_output_dict 
開發者ID:apple,項目名稱:coremltools,代碼行數:18,代碼來源:_backend.py

示例9: prepare

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def prepare(
        cls,
        model,  # type: ModelProto
        device="CPU",  # type: Text
        minimum_ios_deployment_target="12",  # type: str
        **kwargs  # type: Any
    ):
        # type: (...) -> CoreMLRep
        super(CoreMLBackend, cls).prepare(model, device, **kwargs)
        if DEBUG:
            with open("/tmp/node_model.onnx", "wb") as f:
                s = model.SerializeToString()
                f.write(s)
        coreml_model = convert(
            model, minimum_ios_deployment_target=minimum_ios_deployment_target
        )
        if DEBUG:
            coreml_model.save("/tmp/node_model.mlmodel")
        onnx_outputs_info = _get_onnx_outputs_info(model)
        return CoreMLRep(
            coreml_model,
            onnx_outputs_info,
            device == "CPU",
            minimum_ios_deployment_target=minimum_ios_deployment_target,
        ) 
開發者ID:apple,項目名稱:coremltools,代碼行數:27,代碼來源:_backend.py

示例10: is_compatible

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def is_compatible(
        cls,
        model,  # type: ModelProto
        device="CPU",  # type: Text
        **kwargs  # type: Any
    ):  # type: (...) -> bool
        # Return whether the model is compatible with CoreML.
        """
        This function will gradually grow to cover more cases.
        Need to be careful of false negatives. There are some cases that seemingly
        are not supported on CoreML, which the graph transformer optimizes and converts to
        a graph that can be converted to CoreML.

        2. Unsupported ops: If graph has one of unsupported op, exit

        """
        ## TODO: Add un-supported ops
        unsupported_ops = []
        graph = model.graph
        for node in graph.node:
            if node.op_type in unsupported_ops:
                return False
        return True 
開發者ID:apple,項目名稱:coremltools,代碼行數:25,代碼來源:_backend.py

示例11: save

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def save(self, dst):
        if self._onnx_model_path:
            shutil.copyfile(
                self._onnx_model_path, self.spec._saved_model_file_path(dst)
            )
        elif self._model_proto:
            try:
                import onnx
            except ImportError:
                raise MissingDependencyException(
                    '"onnx" package is required for packing with OnnxModelArtifact'
                )
            onnx.save_model(self._model_proto, self.spec._saved_model_file_path(dst))
        else:
            raise InvalidArgument(
                'onnx.ModelProto or a model file path is required to pack an '
                'OnnxModelArtifact'
            ) 
開發者ID:bentoml,項目名稱:BentoML,代碼行數:20,代碼來源:onnx_model_artifact.py

示例12: prepare

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def prepare(cls, model, device='CUDA:0', **kwargs):
        """Build a TensorRT engine from the onnx model.

        Parameters
        ----------
        model : onnx.ModelProto
            The onnx model.
        device : str, optional
            The executing device.

        Returns
        -------
        dragon.vm.tensorrt.ONNXBackendRep
            The backend rep.

        """
        if not isinstance(device, Device):
            device = Device(device)
        return ONNXBackendRep(model, device, **kwargs) 
開發者ID:seetaresearch,項目名稱:dragon,代碼行數:21,代碼來源:backend.py

示例13: run_model

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def run_model(cls, model, inputs, device='CUDA:0', **kwargs):
        """Build and run a TensorRT engine from the onnx model.

        Parameters
        ----------
        model : onnx.ModelProto
            The onnx model.
        inputs : Union[Sequence, Dict]
            The input arrays.
        device : str, optional
            The executing device.

        Returns
        -------
        namedtuple
            The model outputs.

        """
        return cls.prepare(model, device, **kwargs).run(inputs) 
開發者ID:seetaresearch,項目名稱:dragon,代碼行數:21,代碼來源:backend.py

示例14: optimize_model

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def optimize_model(input,
                   model_type,
                   num_heads,
                   hidden_size,
                   opt_level=99,
                   optimization_options=None):
    (optimizer_class, producer, run_onnxruntime) = MODEL_CLASSES[model_type]

    input_model_path = input
    if run_onnxruntime and opt_level > 0:
        input_model_path = optimize_by_onnxruntime(input_model_path, use_gpu=False, opt_level=opt_level)
        logger.info("Use OnnxRuntime to optimize and save the optimized model to {}".format(input_model_path))

    model = ModelProto()
    with open(input_model_path, "rb") as f:
        model.ParseFromString(f.read())

    if model.producer_name and producer != model.producer_name:
        logger.warning(
            f"Model producer not matched: Expect {producer},  Got {model.producer_name} {model.producer_version}. Please specify correct --model_type parameter."
        )

    if optimization_options is None:
        optimization_options = BertOptimizationOptions(model_type)

    bert_model = optimizer_class(model, num_heads, hidden_size)
    bert_model.optimize(optimization_options)

    return bert_model 
開發者ID:deepset-ai,項目名稱:FARM,代碼行數:31,代碼來源:bert_model_optimization.py

示例15: save_model_to_file

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import ModelProto [as 別名]
def save_model_to_file(self, output_path):
        logger.info(f"Output model to {output_path}")

        if output_path.endswith(".json"):
            assert isinstance(self.model, ModelProto)
            with open(output_path, "w") as out:
                out.write(str(self.model))
        else:
            with open(output_path, "wb") as out:
                out.write(self.model.SerializeToString()) 
開發者ID:deepset-ai,項目名稱:FARM,代碼行數:12,代碼來源:OnnxModel.py


注:本文中的onnx.ModelProto方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。