當前位置: 首頁>>代碼示例>>Python>>正文


Python onnx.load方法代碼示例

本文整理匯總了Python中onnx.load方法的典型用法代碼示例。如果您正苦於以下問題:Python onnx.load方法的具體用法?Python onnx.load怎麽用?Python onnx.load使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在onnx的用法示例。


在下文中一共展示了onnx.load方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_rewrite_onnx_file

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def test_rewrite_onnx_file():
    input_rewriter.rewrite_onnx_file(
        'out/backprop_test_mnist_mlp/model.onnx',
        'out/backprop_test_mnist_mlp/model_bs3.onnx',
        [input_rewriter.Type(shape=(3, 784)),
         input_rewriter.Type(shape=(3, 10))])
    xmodel = onnx.load('out/backprop_test_mnist_mlp/model_bs3.onnx')
    xgraph = xmodel.graph

    def get_shape(vi):
        return tuple([d.dim_value for d in vi.type.tensor_type.shape.dim])

    inputs = _get_inputs(xgraph)

    assert 1 == inputs[0].type.tensor_type.elem_type
    assert 1 == inputs[1].type.tensor_type.elem_type
    assert (3, 784) == get_shape(inputs[0])
    assert (3, 10) == get_shape(inputs[1])
    assert 1 == xgraph.output[0].type.tensor_type.elem_type
    assert () == get_shape(xgraph.output[0])
    for init in xgraph.initializer:
        assert 1 == init.data_type 
開發者ID:pfnet-research,項目名稱:chainer-compiler,代碼行數:24,代碼來源:input_rewriter_test.py

示例2: test_rewrite_onnx_testdir

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def test_rewrite_onnx_testdir():
    input_rewriter.rewrite_onnx_testdir(
        'out/backprop_test_mnist_mlp',
        'out/backprop_test_mnist_mlp_fp64',
        [input_rewriter.Type(dtype=np.float64),
         input_rewriter.Type(dtype=np.float64)])
    xmodel = onnx.load('out/backprop_test_mnist_mlp_fp64/model.onnx')
    xgraph = xmodel.graph

    assert 11 == xgraph.input[0].type.tensor_type.elem_type
    assert 11 == xgraph.input[1].type.tensor_type.elem_type
    assert 11 == xgraph.output[0].type.tensor_type.elem_type
    for init in xgraph.initializer:
        assert 11 == init.data_type

    for tensor_proto in glob.glob(
            'out/backprop_test_mnist_mlp_fp64/test_data_set_0/*.pb'):
        xtensor = onnx.load_tensor(tensor_proto)
        assert 11 == xtensor.data_type 
開發者ID:pfnet-research,項目名稱:chainer-compiler,代碼行數:21,代碼來源:input_rewriter_test.py

示例3: _get_onnx_outputs_info

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def _get_onnx_outputs_info(model): # type: (...) -> Dict[Text, EdgeInfo]
    """
    Takes in an onnx model and returns a dictionary 
    of onnx output names mapped to a tuple that is (output_name, type, shape)
    """
    if isinstance(model, str):
        onnx_model = onnx.load(model)
    elif isinstance(model, onnx.ModelProto):
        onnx_model = model

    graph = onnx_model.graph
    onnx_output_dict = {}
    for o in graph.output:
        out = _input_from_onnx_input(o)
        onnx_output_dict[out[0]] = out
    return onnx_output_dict 
開發者ID:onnx,項目名稱:onnx-coreml,代碼行數:18,代碼來源:_backend.py

示例4: convert

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def convert(infile, outfile, **kwargs):
  """Convert pb.

  Args:
    infile: Input path.
    outfile: Output path.
    **kwargs: Other args for converting.

  Returns:
    None.
  """
  logging_level = kwargs.get("logging_level", "INFO")
  common.logger.setLevel(logging_level)
  common.logger.handlers[0].setLevel(logging_level)

  common.logger.info("Start converting onnx pb to tf pb:")
  onnx_model = onnx.load(infile)
  tf_rep = backend.prepare(onnx_model, **kwargs)
  tf_rep.export_graph(outfile)
  common.logger.info("Converting completes successfully.") 
開發者ID:onnx,項目名稱:onnx-tensorflow,代碼行數:22,代碼來源:converter.py

示例5: test_output_grad

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def test_output_grad(tmpdir, model, x, train, disable_experimental_warning):
    path = str(tmpdir)
    export_testcase(model, (x,), path, output_grad=True, train=train)

    model_filename = os.path.join(path, 'model.onnx')
    assert os.path.isfile(model_filename)
    assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'input_0.pb'))
    assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'output_0.pb'))

    onnx_model = onnx.load(model_filename)
    initializer_names = {i.name for i in onnx_model.graph.initializer}

    # 10 gradient files should be there
    for i in range(12):
        tensor_filename = os.path.join(
            path, 'test_data_set_0', 'gradient_{}.pb'.format(i))
        assert os.path.isfile(tensor_filename)
        tensor = onnx.load_tensor(tensor_filename)
        assert tensor.name.startswith('param_')
        assert tensor.name in initializer_names
    assert not os.path.isfile(
        os.path.join(path, 'test_data_set_0', 'gradient_12.pb')) 
開發者ID:chainer,項目名稱:chainer,代碼行數:24,代碼來源:test_export_testcase.py

示例6: from_save

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def from_save(cls, name:str, model_builder:ModelBuilder) -> AbsModel:
        r'''
        Instantiated a :class:`~lumin.nn.models.model.Model` and load saved state from file.
        
        Arguments:
            name: name of file containing saved state
            model_builder: :class:`~lumin.nn.models.model_builder.ModelBuilder` which was used to construct the network
        
        Returns:
            Instantiated :class:`~lumin.nn.models.model.Model` with network weights, optimiser state, and input mask loaded from saved state
        
        Examples::
            >>> model = Model.from_save('weights/model.h5', model_builder)
        '''

        m = cls(model_builder)
        m.load(name)
        return m 
開發者ID:GilesStrong,項目名稱:lumin,代碼行數:20,代碼來源:model.py

示例7: load

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def load(self, name:str, model_builder:ModelBuilder=None) -> None:
        r'''
        Load model, optimiser, and input mask states from file

        Arguments:
            name: name of save file
            model_builder: if :class:`~lumin.nn.models.model.Model` was not initialised with a :class:`~lumin.nn.models.model_builder.ModelBuilder`, you will need to pass one here
        '''

        # TODO: update map location when device choice is changable by user

        if model_builder is not None: self.model, self.opt, self.loss, self.input_mask = model_builder.get_model()
        state = torch.load(name, map_location='cuda' if torch.cuda.is_available() else 'cpu')
        self.model.load_state_dict(state['model'])
        self.opt.load_state_dict(state['opt'])
        self.input_mask = state['input_mask']
        self.objective = self.model_builder.objective if model_builder is None else model_builder.objective 
開發者ID:GilesStrong,項目名稱:lumin,代碼行數:19,代碼來源:model.py

示例8: export2tfpb

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def export2tfpb(self, name:str, bs:int=1) -> None:
        r'''
        Export network to Tensorflow ProtocolBuffer format, via ONNX.
        Note that ONNX expects a fixed batch size (bs) which is the number of datapoints your wish to pass through the model concurrently.

        Arguments:
            name: filename for exported file
            bs: batch size for exported models
        '''

        import onnx
        from onnx_tf.backend import prepare
        warnings.warn("""Tensorflow ProtocolBuffer export of LUMIN models (via ONNX) has not been fully explored or sufficiently tested yet.
                         Please use with caution, and report any trouble""")
        self.export2onnx(name, bs)
        m = onnx.load(f'{name}.onnx')
        tf_rep = prepare(m)
        tf_rep.export_graph(f'{name}.pb') 
開發者ID:GilesStrong,項目名稱:lumin,代碼行數:20,代碼來源:model.py

示例9: _get_onnx_outputs_info

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def _get_onnx_outputs_info(model):  # type: (...) -> Dict[Text, EdgeInfo]
    """
    Takes in an onnx model and returns a dictionary
    of onnx output names mapped to a tuple that is (output_name, type, shape)
    """
    if isinstance(model, _string_types):
        onnx_model = onnx.load(model)
    elif isinstance(model, onnx.ModelProto):
        onnx_model = model

    graph = onnx_model.graph
    onnx_output_dict = {}
    for o in graph.output:
        out = _input_from_onnx_input(o)
        onnx_output_dict[out[0]] = out
    return onnx_output_dict 
開發者ID:apple,項目名稱:coremltools,代碼行數:18,代碼來源:_backend.py

示例10: LoadLabels

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def LoadLabels(label_file):
    """load labels from file"""
    if not os.path.isfile(label_file):
        logging.error("Can not find lable file {}.".format(label_file))
        return None
    labels = {}
    with open(label_file) as l:
        label_lines = [line.rstrip('\n') for line in l.readlines()]
    for line in label_lines:
        result, code = line.partition(" ")[::2]
        if code and result:
            result = result.strip()
            result = result[result.index("/")+1:]
            if result in labels:
                logging.warning("Repeated name {0} for code {1}in label file. Ignored!"
                                .format(result, code))
            else:
                labels[result] = int(code.strip())
    return labels 
開發者ID:intel,項目名稱:optimized-models,代碼行數:21,代碼來源:common_caffe2.py

示例11: LoadValidation

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def LoadValidation(validation_file):
    """load validation file"""
    if not os.path.isfile(validation_file):
        logging.error("Can not find validation file {}."
                      .format(validation_file))
        return None
    validation = {}
    with open(validation_file) as v:
        validation_lines = [line.rstrip('\n') for line in v.readlines()]
    for line in validation_lines:
        name, code = line.partition(" ")[::2]
        if name and code:
            name = name.strip()
            if name in validation:
                logging.warning("Repeated name {0} for code {1} in"
                                " validation file. Ignored!"
                                .format(name, code))
            else:
                validation[name] = int(code.strip())
    return validation 
開發者ID:intel,項目名稱:optimized-models,代碼行數:22,代碼來源:common_caffe2.py

示例12: onnx_inference

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def onnx_inference(args):
    # Load the ONNX model
    model = onnx.load("models/deepspeech_{}.onnx".format(args.continue_from))

    # Check that the IR is well formed
    onnx.checker.check_model(model)

    onnx.helper.printable_graph(model.graph)

    print("model checked, preparing backend!")
    rep = backend.prepare(model, device="CPU")  # or "CPU"

    print("running inference!")

    # Hard coded input dim
    inputs = np.random.randn(16, 1, 161, 129).astype(np.float32)

    start = time.time()
    outputs = rep.run(inputs)
    print("time used: {}".format(time.time() - start))
    # To run networks with more than one input, pass a tuple
    # rather than a single numpy ndarray.
    print(outputs[0]) 
開發者ID:mlperf,項目名稱:inference,代碼行數:25,代碼來源:convert_onnx.py

示例13: stylize_onnx_caffe2

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def stylize_onnx_caffe2(content_image, args):
    """
    Read ONNX model and run it using Caffe2
    """

    assert not args.export_onnx

    import onnx
    import onnx_caffe2.backend

    model = onnx.load(args.model)

    prepared_backend = onnx_caffe2.backend.prepare(model, device='CUDA' if args.cuda else 'CPU')
    inp = {model.graph.input[0].name: content_image.numpy()}
    c2_out = prepared_backend.run(inp)[0]

    return torch.from_numpy(c2_out) 
開發者ID:pytorch,項目名稱:examples,代碼行數:19,代碼來源:neural_style.py

示例14: import_model

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def import_model(model_file):
    """Imports the supplied ONNX model file into MXNet symbol and parameters.

    Parameters
    ----------
    model_file : ONNX model file name

    Returns
    -------
    sym : mx.symbol
        Compatible mxnet symbol

    params : dict of str to mx.ndarray
        Dict of converted parameters stored in mx.ndarray format
    """
    graph = GraphProto()

    # loads model file and returns ONNX protobuf object
    model_proto = onnx.load(model_file)
    sym, params = graph.from_onnx(model_proto.graph)
    return sym, params 
開發者ID:onnx,項目名稱:onnx-mxnet,代碼行數:23,代碼來源:__init__.py

示例15: __init__

# 需要導入模塊: import onnx [as 別名]
# 或者: from onnx import load [as 別名]
def __init__(self, onnx_model_proto, make_deepcopy=False):
        """Creates a ModelWrapper instance.
        onnx_model_proto can be either a ModelProto instance, or a string
        with the path to a stored .onnx file on disk, or serialized bytes.
        The make_deepcopy option controls whether a deep copy of the ModelProto
        is made internally.
        """
        if isinstance(onnx_model_proto, str):
            self._model_proto = onnx.load(onnx_model_proto)
        elif isinstance(onnx_model_proto, bytes):
            self._model_proto = onnx.load_from_string(onnx_model_proto)
        else:
            if make_deepcopy:
                self._model_proto = copy.deepcopy(onnx_model_proto)
            else:
                self._model_proto = onnx_model_proto 
開發者ID:Xilinx,項目名稱:finn,代碼行數:18,代碼來源:modelwrapper.py


注:本文中的onnx.load方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。