当前位置: 首页>>代码示例>>Python>>正文


Python onnx.load_from_string方法代码示例

本文整理汇总了Python中onnx.load_from_string方法的典型用法代码示例。如果您正苦于以下问题:Python onnx.load_from_string方法的具体用法?Python onnx.load_from_string怎么用?Python onnx.load_from_string使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在onnx的用法示例。


在下文中一共展示了onnx.load_from_string方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load_from_string [as 别名]
def __init__(self, onnx_model_proto, make_deepcopy=False):
        """Creates a ModelWrapper instance.
        onnx_model_proto can be either a ModelProto instance, or a string
        with the path to a stored .onnx file on disk, or serialized bytes.
        The make_deepcopy option controls whether a deep copy of the ModelProto
        is made internally.
        """
        if isinstance(onnx_model_proto, str):
            self._model_proto = onnx.load(onnx_model_proto)
        elif isinstance(onnx_model_proto, bytes):
            self._model_proto = onnx.load_from_string(onnx_model_proto)
        else:
            if make_deepcopy:
                self._model_proto = copy.deepcopy(onnx_model_proto)
            else:
                self._model_proto = onnx_model_proto 
开发者ID:Xilinx,项目名称:finn,代码行数:18,代码来源:modelwrapper.py

示例2: export_onnx_model

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load_from_string [as 别名]
def export_onnx_model(model, inputs, passes):
    """Trace and export a model to onnx format. Modified from
    https://github.com/facebookresearch/detectron2/

    Args:
        model (nn.Module):
        inputs (tuple[args]): the model will be called by `model(*inputs)`
        passes (None or list[str]): the optimization passed for ONNX model

    Returns:
        an onnx model
    """
    assert isinstance(model, torch.nn.Module)

    # make sure all modules are in eval mode, onnx may change the training
    # state of the module if the states are not consistent
    def _check_eval(module):
        assert not module.training

    model.apply(_check_eval)

    # Export the model to ONNX
    with torch.no_grad():
        with io.BytesIO() as f:
            torch.onnx.export(
                model,
                inputs,
                f,
                operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
                # verbose=True,  # NOTE: uncomment this for debugging
                # export_params=True,
            )
            onnx_model = onnx.load_from_string(f.getvalue())

    # Apply ONNX's Optimization
    if passes is not None:
        all_passes = optimizer.get_available_passes()
        assert all(p in all_passes for p in passes), \
            f'Only {all_passes} are supported'
    onnx_model = optimizer.optimize(onnx_model, passes)
    return onnx_model 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:43,代码来源:pytorch2onnx.py

示例3: convert_version

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load_from_string [as 别名]
def convert_version(model, target_version):  # type: (ModelProto, int) -> ModelProto
    if not isinstance(model, ModelProto):
        raise ValueError('VersionConverter only accepts ModelProto as model, incorrect type: {}'.format(type(model)))
    if not isinstance(target_version, int):
        raise ValueError('VersionConverter only accepts int as target_version, incorrect type: {}'.format(type(target_version)))
    model_str = model.SerializeToString()
    converted_model_str = C.convert_version(model_str, target_version)
    return onnx.load_from_string(converted_model_str) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:10,代码来源:version_converter.py

示例4: infer_shapes

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load_from_string [as 别名]
def infer_shapes(model):  # type: (ModelProto) -> ModelProto
    if not isinstance(model, ModelProto):
        raise ValueError('Shape inference only accepts ModelProto, '
                         'incorrect type: {}'.format(type(model)))

    model_str = model.SerializeToString()
    inferred_model_str = C.infer_shapes(model_str)
    return onnx.load_from_string(inferred_model_str) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:10,代码来源:shape_inference.py

示例5: optimize

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load_from_string [as 别名]
def optimize(model, passes=[]):  # type: (ModelProto, Sequence[Text]) -> ModelProto
    if len(passes) == 0:
        passes = ['eliminate_nop_transpose',
                  'fuse_consecutive_transposes',
                  'fuse_transpose_into_gemm']
    if not isinstance(model, ModelProto):
        raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))

    model_str = model.SerializeToString()
    optimized_model_str = C.optimize(model_str, passes)
    return onnx.load_from_string(optimized_model_str) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:13,代码来源:optimizer.py

示例6: load_and_parse_binary

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load_from_string [as 别名]
def load_and_parse_binary(binary: onnx.ModelProto) -> OnnxModel:
    model = onnx.load_from_string(binary)
    model = OnnxModel.create_from_onnx_model(model)
    model = clean_model(model)
    return model 
开发者ID:deep500,项目名称:deep500,代码行数:7,代码来源:parser.py

示例7: load_model_only

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load_from_string [as 别名]
def load_model_only(path: str) -> onnx.ModelProto:
    """
     @param path path to file
     @return deserialized onnx model
     """
    with open(path, 'rb') as model_file:
        binary = model_file.read()
    model = onnx.load_from_string(binary)
    return model 
开发者ID:deep500,项目名称:deep500,代码行数:11,代码来源:parser.py

示例8: export_onnx_model

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load_from_string [as 别名]
def export_onnx_model(model, inputs):
    """
    Trace and export a model to onnx format.

    Args:
        model (nn.Module):
        inputs (tuple[args]): the model will be called by `model(*inputs)`

    Returns:
        an onnx model
    """
    assert isinstance(model, torch.nn.Module)

    # make sure all modules are in eval mode, onnx may change the training state
    # of the module if the states are not consistent
    def _check_eval(module):
        assert not module.training

    model.apply(_check_eval)

    # Export the model to ONNX
    with torch.no_grad():
        with io.BytesIO() as f:
            torch.onnx.export(
                model,
                inputs,
                f,
                operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
                # verbose=True,  # NOTE: uncomment this for debugging
                # export_params=True,
            )
            onnx_model = onnx.load_from_string(f.getvalue())

    # Apply ONNX's Optimization
    all_passes = onnx.optimizer.get_available_passes()
    passes = ["fuse_bn_into_conv"]
    assert all(p in all_passes for p in passes)
    onnx_model = onnx.optimizer.optimize(onnx_model, passes)
    return onnx_model 
开发者ID:facebookresearch,项目名称:detectron2,代码行数:41,代码来源:caffe2_export.py

示例9: _export_via_onnx

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load_from_string [as 别名]
def _export_via_onnx(model, inputs):
    from ipdb import set_trace;
    set_trace()

    def _check_val(module):
        assert not module.training

    model.apply(_check_val)

    # Export the model to ONNX
    with torch.no_grad():
        with io.BytesIO() as f:
            torch.onnx.export(
                model,
                inputs,
                f,
                # verbose=True,  # NOTE: uncomment this for debugging
                export_params=True,
            )
            onnx_model = onnx.load_from_string(f.getvalue())
    # torch.onnx.export(model,  # model being run
    #                   inputs,  # model input (or a tuple for multiple inputs)
    #                   "reid_test.onnx",  # where to save the model (can be a file or file-like object)
    # export_params=True,  # store the trained parameter weights inside the model file
    # opset_version=10,  # the ONNX version to export the model to
    # do_constant_folding=True,  # whether to execute constant folding for optimization
    # input_names=['input'],  # the model's input names
    # output_names=['output'],  # the model's output names
    # dynamic_axes={'input': {0: 'batch_size'},  # variable lenght axes
    #               'output': {0: 'batch_size'}})
    # )

    # Apply ONNX's Optimization
    # all_passes = optimizer.get_available_passes()
    # passes = ["fuse_bn_into_conv"]
    # assert all(p in all_passes for p in passes)
    # onnx_model = optimizer.optimize(onnx_model, passes)

    # Convert ONNX Model to Tensorflow Model
    tf_rep = prepare(onnx_model, strict=False)  # Import the ONNX model to Tensorflow
    print(tf_rep.inputs)  # Input nodes to the model
    print('-----')
    print(tf_rep.outputs)  # Output nodes from the model
    print('-----')
    # print(tf_rep.tensor_dict)  # All nodes in the model
    # """

    # install onnx-tensorflow from github,and tf_rep = prepare(onnx_model, strict=False)
    # Reference https://github.com/onnx/onnx-tensorflow/issues/167
    # tf_rep = prepare(onnx_model) # whthout strict=False leads to KeyError: 'pyfunc_0'

    # debug, here using the same input to check onnx and tf.
    # output_onnx_tf = tf_rep.run(to_numpy(img))
    # print('output_onnx_tf = {}'.format(output_onnx_tf))
    # onnx --> tf.graph.pb
    # tf_pb_path = 'reid_tf_graph.pb'
    # tf_rep.export_graph(tf_pb_path)

    return tf_rep 
开发者ID:JDAI-CV,项目名称:fast-reid,代码行数:61,代码来源:tensorflow_export.py

示例10: convert_tests

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import load_from_string [as 别名]
def convert_tests(testcases, sets=1):
    print("Collect {} test cases from PyTorch.".format(len(testcases)))
    failed = 0
    ops = set()
    for t in testcases:
        test_name = get_test_name(t)
        module = gen_module(t)
        try:
            input = gen_input(t)
            f = io.BytesIO()
            torch.onnx._export(module, input, f)
            onnx_model = onnx.load_from_string(f.getvalue())
            onnx.checker.check_model(onnx_model)
            onnx.helper.strip_doc_string(onnx_model)
            output_dir = os.path.join(test_onnx_common.pytorch_converted_dir, test_name)

            if os.path.exists(output_dir):
                shutil.rmtree(output_dir)
            os.makedirs(output_dir)
            with open(os.path.join(output_dir, "model.onnx"), "wb") as file:
                file.write(onnx_model.SerializeToString())

            for i in range(sets):
                output = module(input)
                data_dir = os.path.join(output_dir, "test_data_set_{}".format(i))
                os.makedirs(data_dir)

                for index, var in enumerate([input]):
                    tensor = numpy_helper.from_array(var.data.numpy())
                    with open(os.path.join(data_dir, "input_{}.pb".format(index)), "wb") as file:
                        file.write(tensor.SerializeToString())
                for index, var in enumerate([output]):
                    tensor = numpy_helper.from_array(var.data.numpy())
                    with open(os.path.join(data_dir, "output_{}.pb".format(index)), "wb") as file:
                        file.write(tensor.SerializeToString())
                input = gen_input(t)
        except:
            traceback.print_exc()
            failed += 1

    print("Collect {} test cases from PyTorch repo, failed to export {} cases.".format(
        len(testcases), failed))
    print("PyTorch converted cases are stored in {}.".format(test_onnx_common.pytorch_converted_dir)) 
开发者ID:onnxbot,项目名称:onnx-fb-universe,代码行数:45,代码来源:convert_pytorch_test.py


注:本文中的onnx.load_from_string方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。