当前位置: 首页>>代码示例>>Python>>正文


Python onnx.load方法代码示例

本文整理汇总了Python中onnx.load方法的典型用法代码示例。如果您正苦于以下问题:Python onnx.load方法的具体用法?Python onnx.load怎么用?Python onnx.load使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在onnx的用法示例。


在下文中一共展示了onnx.load方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_rewrite_onnx_file

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def test_rewrite_onnx_file():
    input_rewriter.rewrite_onnx_file(
        'out/backprop_test_mnist_mlp/model.onnx',
        'out/backprop_test_mnist_mlp/model_bs3.onnx',
        [input_rewriter.Type(shape=(3, 784)),
         input_rewriter.Type(shape=(3, 10))])
    xmodel = onnx.load('out/backprop_test_mnist_mlp/model_bs3.onnx')
    xgraph = xmodel.graph

    def get_shape(vi):
        return tuple([d.dim_value for d in vi.type.tensor_type.shape.dim])

    inputs = _get_inputs(xgraph)

    assert 1 == inputs[0].type.tensor_type.elem_type
    assert 1 == inputs[1].type.tensor_type.elem_type
    assert (3, 784) == get_shape(inputs[0])
    assert (3, 10) == get_shape(inputs[1])
    assert 1 == xgraph.output[0].type.tensor_type.elem_type
    assert () == get_shape(xgraph.output[0])
    for init in xgraph.initializer:
        assert 1 == init.data_type 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:24,代码来源:input_rewriter_test.py

示例2: test_rewrite_onnx_testdir

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def test_rewrite_onnx_testdir():
    input_rewriter.rewrite_onnx_testdir(
        'out/backprop_test_mnist_mlp',
        'out/backprop_test_mnist_mlp_fp64',
        [input_rewriter.Type(dtype=np.float64),
         input_rewriter.Type(dtype=np.float64)])
    xmodel = onnx.load('out/backprop_test_mnist_mlp_fp64/model.onnx')
    xgraph = xmodel.graph

    assert 11 == xgraph.input[0].type.tensor_type.elem_type
    assert 11 == xgraph.input[1].type.tensor_type.elem_type
    assert 11 == xgraph.output[0].type.tensor_type.elem_type
    for init in xgraph.initializer:
        assert 11 == init.data_type

    for tensor_proto in glob.glob(
            'out/backprop_test_mnist_mlp_fp64/test_data_set_0/*.pb'):
        xtensor = onnx.load_tensor(tensor_proto)
        assert 11 == xtensor.data_type 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:21,代码来源:input_rewriter_test.py

示例3: _get_onnx_outputs_info

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def _get_onnx_outputs_info(model): # type: (...) -> Dict[Text, EdgeInfo]
    """
    Takes in an onnx model and returns a dictionary 
    of onnx output names mapped to a tuple that is (output_name, type, shape)
    """
    if isinstance(model, str):
        onnx_model = onnx.load(model)
    elif isinstance(model, onnx.ModelProto):
        onnx_model = model

    graph = onnx_model.graph
    onnx_output_dict = {}
    for o in graph.output:
        out = _input_from_onnx_input(o)
        onnx_output_dict[out[0]] = out
    return onnx_output_dict 
开发者ID:onnx,项目名称:onnx-coreml,代码行数:18,代码来源:_backend.py

示例4: convert

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def convert(infile, outfile, **kwargs):
  """Convert pb.

  Args:
    infile: Input path.
    outfile: Output path.
    **kwargs: Other args for converting.

  Returns:
    None.
  """
  logging_level = kwargs.get("logging_level", "INFO")
  common.logger.setLevel(logging_level)
  common.logger.handlers[0].setLevel(logging_level)

  common.logger.info("Start converting onnx pb to tf pb:")
  onnx_model = onnx.load(infile)
  tf_rep = backend.prepare(onnx_model, **kwargs)
  tf_rep.export_graph(outfile)
  common.logger.info("Converting completes successfully.") 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:22,代码来源:converter.py

示例5: test_output_grad

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def test_output_grad(tmpdir, model, x, train, disable_experimental_warning):
    path = str(tmpdir)
    export_testcase(model, (x,), path, output_grad=True, train=train)

    model_filename = os.path.join(path, 'model.onnx')
    assert os.path.isfile(model_filename)
    assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'input_0.pb'))
    assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'output_0.pb'))

    onnx_model = onnx.load(model_filename)
    initializer_names = {i.name for i in onnx_model.graph.initializer}

    # 10 gradient files should be there
    for i in range(12):
        tensor_filename = os.path.join(
            path, 'test_data_set_0', 'gradient_{}.pb'.format(i))
        assert os.path.isfile(tensor_filename)
        tensor = onnx.load_tensor(tensor_filename)
        assert tensor.name.startswith('param_')
        assert tensor.name in initializer_names
    assert not os.path.isfile(
        os.path.join(path, 'test_data_set_0', 'gradient_12.pb')) 
开发者ID:chainer,项目名称:chainer,代码行数:24,代码来源:test_export_testcase.py

示例6: from_save

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def from_save(cls, name:str, model_builder:ModelBuilder) -> AbsModel:
        r'''
        Instantiated a :class:`~lumin.nn.models.model.Model` and load saved state from file.
        
        Arguments:
            name: name of file containing saved state
            model_builder: :class:`~lumin.nn.models.model_builder.ModelBuilder` which was used to construct the network
        
        Returns:
            Instantiated :class:`~lumin.nn.models.model.Model` with network weights, optimiser state, and input mask loaded from saved state
        
        Examples::
            >>> model = Model.from_save('weights/model.h5', model_builder)
        '''

        m = cls(model_builder)
        m.load(name)
        return m 
开发者ID:GilesStrong,项目名称:lumin,代码行数:20,代码来源:model.py

示例7: load

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def load(self, name:str, model_builder:ModelBuilder=None) -> None:
        r'''
        Load model, optimiser, and input mask states from file

        Arguments:
            name: name of save file
            model_builder: if :class:`~lumin.nn.models.model.Model` was not initialised with a :class:`~lumin.nn.models.model_builder.ModelBuilder`, you will need to pass one here
        '''

        # TODO: update map location when device choice is changable by user

        if model_builder is not None: self.model, self.opt, self.loss, self.input_mask = model_builder.get_model()
        state = torch.load(name, map_location='cuda' if torch.cuda.is_available() else 'cpu')
        self.model.load_state_dict(state['model'])
        self.opt.load_state_dict(state['opt'])
        self.input_mask = state['input_mask']
        self.objective = self.model_builder.objective if model_builder is None else model_builder.objective 
开发者ID:GilesStrong,项目名称:lumin,代码行数:19,代码来源:model.py

示例8: export2tfpb

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def export2tfpb(self, name:str, bs:int=1) -> None:
        r'''
        Export network to Tensorflow ProtocolBuffer format, via ONNX.
        Note that ONNX expects a fixed batch size (bs) which is the number of datapoints your wish to pass through the model concurrently.

        Arguments:
            name: filename for exported file
            bs: batch size for exported models
        '''

        import onnx
        from onnx_tf.backend import prepare
        warnings.warn("""Tensorflow ProtocolBuffer export of LUMIN models (via ONNX) has not been fully explored or sufficiently tested yet.
                         Please use with caution, and report any trouble""")
        self.export2onnx(name, bs)
        m = onnx.load(f'{name}.onnx')
        tf_rep = prepare(m)
        tf_rep.export_graph(f'{name}.pb') 
开发者ID:GilesStrong,项目名称:lumin,代码行数:20,代码来源:model.py

示例9: _get_onnx_outputs_info

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def _get_onnx_outputs_info(model):  # type: (...) -> Dict[Text, EdgeInfo]
    """
    Takes in an onnx model and returns a dictionary
    of onnx output names mapped to a tuple that is (output_name, type, shape)
    """
    if isinstance(model, _string_types):
        onnx_model = onnx.load(model)
    elif isinstance(model, onnx.ModelProto):
        onnx_model = model

    graph = onnx_model.graph
    onnx_output_dict = {}
    for o in graph.output:
        out = _input_from_onnx_input(o)
        onnx_output_dict[out[0]] = out
    return onnx_output_dict 
开发者ID:apple,项目名称:coremltools,代码行数:18,代码来源:_backend.py

示例10: LoadLabels

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def LoadLabels(label_file):
    """load labels from file"""
    if not os.path.isfile(label_file):
        logging.error("Can not find lable file {}.".format(label_file))
        return None
    labels = {}
    with open(label_file) as l:
        label_lines = [line.rstrip('\n') for line in l.readlines()]
    for line in label_lines:
        result, code = line.partition(" ")[::2]
        if code and result:
            result = result.strip()
            result = result[result.index("/")+1:]
            if result in labels:
                logging.warning("Repeated name {0} for code {1}in label file. Ignored!"
                                .format(result, code))
            else:
                labels[result] = int(code.strip())
    return labels 
开发者ID:intel,项目名称:optimized-models,代码行数:21,代码来源:common_caffe2.py

示例11: LoadValidation

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def LoadValidation(validation_file):
    """load validation file"""
    if not os.path.isfile(validation_file):
        logging.error("Can not find validation file {}."
                      .format(validation_file))
        return None
    validation = {}
    with open(validation_file) as v:
        validation_lines = [line.rstrip('\n') for line in v.readlines()]
    for line in validation_lines:
        name, code = line.partition(" ")[::2]
        if name and code:
            name = name.strip()
            if name in validation:
                logging.warning("Repeated name {0} for code {1} in"
                                " validation file. Ignored!"
                                .format(name, code))
            else:
                validation[name] = int(code.strip())
    return validation 
开发者ID:intel,项目名称:optimized-models,代码行数:22,代码来源:common_caffe2.py

示例12: onnx_inference

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def onnx_inference(args):
    # Load the ONNX model
    model = onnx.load("models/deepspeech_{}.onnx".format(args.continue_from))

    # Check that the IR is well formed
    onnx.checker.check_model(model)

    onnx.helper.printable_graph(model.graph)

    print("model checked, preparing backend!")
    rep = backend.prepare(model, device="CPU")  # or "CPU"

    print("running inference!")

    # Hard coded input dim
    inputs = np.random.randn(16, 1, 161, 129).astype(np.float32)

    start = time.time()
    outputs = rep.run(inputs)
    print("time used: {}".format(time.time() - start))
    # To run networks with more than one input, pass a tuple
    # rather than a single numpy ndarray.
    print(outputs[0]) 
开发者ID:mlperf,项目名称:inference,代码行数:25,代码来源:convert_onnx.py

示例13: stylize_onnx_caffe2

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def stylize_onnx_caffe2(content_image, args):
    """
    Read ONNX model and run it using Caffe2
    """

    assert not args.export_onnx

    import onnx
    import onnx_caffe2.backend

    model = onnx.load(args.model)

    prepared_backend = onnx_caffe2.backend.prepare(model, device='CUDA' if args.cuda else 'CPU')
    inp = {model.graph.input[0].name: content_image.numpy()}
    c2_out = prepared_backend.run(inp)[0]

    return torch.from_numpy(c2_out) 
开发者ID:pytorch,项目名称:examples,代码行数:19,代码来源:neural_style.py

示例14: import_model

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def import_model(model_file):
    """Imports the supplied ONNX model file into MXNet symbol and parameters.

    Parameters
    ----------
    model_file : ONNX model file name

    Returns
    -------
    sym : mx.symbol
        Compatible mxnet symbol

    params : dict of str to mx.ndarray
        Dict of converted parameters stored in mx.ndarray format
    """
    graph = GraphProto()

    # loads model file and returns ONNX protobuf object
    model_proto = onnx.load(model_file)
    sym, params = graph.from_onnx(model_proto.graph)
    return sym, params 
开发者ID:onnx,项目名称:onnx-mxnet,代码行数:23,代码来源:__init__.py

示例15: __init__

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load [as 别名]
def __init__(self, onnx_model_proto, make_deepcopy=False):
        """Creates a ModelWrapper instance.
        onnx_model_proto can be either a ModelProto instance, or a string
        with the path to a stored .onnx file on disk, or serialized bytes.
        The make_deepcopy option controls whether a deep copy of the ModelProto
        is made internally.
        """
        if isinstance(onnx_model_proto, str):
            self._model_proto = onnx.load(onnx_model_proto)
        elif isinstance(onnx_model_proto, bytes):
            self._model_proto = onnx.load_from_string(onnx_model_proto)
        else:
            if make_deepcopy:
                self._model_proto = copy.deepcopy(onnx_model_proto)
            else:
                self._model_proto = onnx_model_proto 
开发者ID:Xilinx,项目名称:finn,代码行数:18,代码来源:modelwrapper.py


注:本文中的onnx.load方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。