当前位置: 首页>>代码示例>>Python>>正文


Python onnx.save_model方法代码示例

本文整理汇总了Python中onnx.save_model方法的典型用法代码示例。如果您正苦于以下问题:Python onnx.save_model方法的具体用法?Python onnx.save_model怎么用?Python onnx.save_model使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在onnx的用法示例。


在下文中一共展示了onnx.save_model方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: save

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def save(self, dst):
        if self._onnx_model_path:
            shutil.copyfile(
                self._onnx_model_path, self.spec._saved_model_file_path(dst)
            )
        elif self._model_proto:
            try:
                import onnx
            except ImportError:
                raise MissingDependencyException(
                    '"onnx" package is required for packing with OnnxModelArtifact'
                )
            onnx.save_model(self._model_proto, self.spec._saved_model_file_path(dst))
        else:
            raise InvalidArgument(
                'onnx.ModelProto or a model file path is required to pack an '
                'OnnxModelArtifact'
            ) 
开发者ID:bentoml,项目名称:BentoML,代码行数:20,代码来源:onnx_model_artifact.py

示例2: main

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def main(input_file, output_file=None, opset=None, channel_first=None):
    """
    A command line interface for Keras model to ONNX converter.
    :param input_file: the original model file path, could be a folder name of TF saved model
    :param output_file: the converted ONNX model file path (optional)
    :param opset: the target opset for the ONNX model.
    :param channel_first: the input name needs to be transposed as NCHW
    :return:
    """

    if not os.path.exists(input_file):
        print("File or directory name '{}' is invalid!".format(input_file))
        return

    file_ext = os.path.splitext(input_file)
    if output_file is None:
        output_file = file_ext[0] + '.onnx'

    assert file_ext[-1] == '.h5', "Unknown file extension."
    kml = tf.keras.models.load_model(input_file)
    oxml = convert_keras(kml, kml.model, '', opset, channel_first)
    onnx.save_model(oxml, output_file) 
开发者ID:onnx,项目名称:keras-onnx,代码行数:24,代码来源:cli.py

示例3: get_default_conda_env

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def get_default_conda_env():
    """
    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.
    """
    import onnx
    import onnxruntime
    return _mlflow_conda_env(
        additional_conda_deps=None,
        additional_pip_deps=[
            "onnx=={}".format(onnx.__version__),
            # The ONNX pyfunc representation requires the OnnxRuntime
            # inference engine. Therefore, the conda environment must
            # include OnnxRuntime
            "onnxruntime=={}".format(onnxruntime.__version__),
        ],
        additional_conda_channels=None,
    ) 
开发者ID:mlflow,项目名称:mlflow,代码行数:20,代码来源:onnx.py

示例4: test_save_and_load_model

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def test_save_and_load_model(self):  # type: () -> None
        proto = self._simple_model()
        cls = ModelProto
        proto_string = onnx._serialize(proto)

        # Test if input is string
        loaded_proto = onnx.load_model_from_string(proto_string)
        self.assertTrue(proto == loaded_proto)

        # Test if input has a read function
        f = io.BytesIO()
        onnx.save_model(proto_string, f)
        f = io.BytesIO(f.getvalue())
        loaded_proto = onnx.load_model(f, cls)
        self.assertTrue(proto == loaded_proto)

        # Test if input is a file name
        try:
            fi = tempfile.NamedTemporaryFile(delete=False)
            onnx.save_model(proto, fi)
            fi.close()

            loaded_proto = onnx.load_model(fi.name, cls)
            self.assertTrue(proto == loaded_proto)
        finally:
            os.remove(fi.name) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:28,代码来源:basic_test.py

示例5: saveonnxmodel

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def saveonnxmodel(onnxmodel,onnx_save_path):
    try:
        onnx.checker.check_model(onnxmodel)
        onnx.save_model(onnxmodel, onnx_save_path)
        print("3.模型保存成功,已保存至"+onnx_save_path)
    except Exception as e:
        print("3.模型存在问题,未保存成功:\n",e) 
开发者ID:htshinichi,项目名称:caffe-onnx,代码行数:9,代码来源:load_save_model.py

示例6: onnx_convert

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def onnx_convert(keras_model_file, output_file, op_set):
    custom_object_dict = get_custom_objects()
    model = load_model(keras_model_file, custom_objects=custom_object_dict)

    # convert to onnx model
    onnx_model = keras2onnx.convert_keras(model, model.name, custom_op_conversions=custom_object_dict, target_opset=op_set)

    # save converted onnx model
    onnx.save_model(onnx_model, output_file) 
开发者ID:david8862,项目名称:keras-YOLOv3-model-set,代码行数:11,代码来源:keras_to_onnx.py

示例7: main

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def main():
    if len(sys.argv) < 4:
        print('decast.py model_in  model_out <op1, ...>')
        return

    input = sys.argv[1]
    output = sys.argv[2]
    op_list = sys.argv[3:]

    oxml = onnx.load_model(input)
    oxml = decast(oxml, op_list)
    onnx.save_model(oxml, output) 
开发者ID:microsoft,项目名称:onnxconverter-common,代码行数:14,代码来源:decast.py

示例8: save

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def save(self, path):
        onnx.save_model(self.oxml, path) 
开发者ID:microsoft,项目名称:onnxconverter-common,代码行数:4,代码来源:onnx_fx.py

示例9: test_optimizer

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def test_optimizer(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)))]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'])]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'])]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'])]
        nodes[4:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
        nodes[5:] = [helper.make_node('Transpose', ['tranpose0'], ['tranpose1'], perm=(0, 3, 1, 2))]
        nodes[6:] = [helper.make_node('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)
        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        self.assertEqual(len(new_nodes), 3)
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertIsNotNone(model) 
开发者ID:microsoft,项目名称:onnxconverter-common,代码行数:33,代码来源:test_opt.py

示例10: test_move_transpose

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def test_move_transpose(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)))]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'])]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'])]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'])]
        nodes[4:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
        nodes[5:] = [helper.make_node('LeakyRelu', ['tranpose0'], ['tranpose1'])]
        nodes[6:] = [helper.make_node('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        self.assertEqual(len(new_nodes), 5)
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertIsNotNone(model) 
开发者ID:microsoft,项目名称:onnxconverter-common,代码行数:34,代码来源:test_opt.py

示例11: test_merge

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def test_merge(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)))]
        nodes[1:] = [helper.make_node('Max', ['input1'], ['max0'])]
        nodes[2:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
        nodes[3:] = [helper.make_node('Transpose', ['tranpose0'], ['add_input1'], perm=(0, 3, 1, 2))]
        nodes[4:] = [helper.make_node('Add', ['max0', 'add_input1'], ['output0'])]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertEqual(len(new_nodes), 3)
        self.assertIsNotNone(model) 
开发者ID:microsoft,项目名称:onnxconverter-common,代码行数:32,代码来源:test_opt.py

示例12: test_fan_in

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def test_fan_in(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)),
                              name="0")]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
        nodes[4:] = [helper.make_node('LeakyRelu', ['max0'], ['leak0'], name="4")]
        nodes[5:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="5")]
        nodes[6:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak2'], name="6")]
        nodes[7:] = [helper.make_node('Transpose', ['leak1'], ['tranpose0'], perm=[0, 2, 3, 1], name="7")]
        nodes[8:] = [helper.make_node('Transpose', ['leak2'], ['tranpose1'], perm=[0, 2, 3, 1], name="8")]
        nodes[9:] = [helper.make_node('Add', ['tranpose0', 'tranpose1'], ['add0'], name="9")]
        nodes[10:] = [helper.make_node('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2], name="10")]
        nodes[11:] = [helper.make_node('Conv', ['tranpose2'], ['output0'], name="11")]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertEqual(len(new_nodes), 6)
        self.assertIsNotNone(model) 
开发者ID:microsoft,项目名称:onnxconverter-common,代码行数:40,代码来源:test_opt.py

示例13: test_merge_common

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def test_merge_common(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)),
                              name="0")]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
        nodes[4:] = [helper.make_node('LeakyRelu', ['max0'], ['leak0'], name="4")]
        nodes[5:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="5")]
        nodes[6:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak2'], name="6")]
        nodes[7:] = [helper.make_node('Cast', ['leak1'], ['cast0'], to=6, name="7")]
        nodes[8:] = [helper.make_node('Cast', ['cast0'], ['cast1'], to=1, name="8")]
        nodes[9:] = [helper.make_node('Cast', ['leak2'], ['cast2'], to=6, name="9")]
        nodes[10:] = [helper.make_node('Cast', ['cast2'], ['cast3'], to=7, name="10")]
        nodes[11:] = [helper.make_node('Cast', ['cast3'], ['cast4'], to=1, name="11")]
        nodes[12:] = [helper.make_node('Add', ['cast1', 'cast4'], ['add0'], name="12")]
        nodes[13:] = [helper.make_node('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2], name="13")]
        nodes[14:] = [helper.make_node('Conv', ['tranpose2'], ['output0'], name="14")]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertEqual(len(new_nodes), 11)
        self.assertIsNotNone(model) 
开发者ID:microsoft,项目名称:onnxconverter-common,代码行数:43,代码来源:test_opt.py

示例14: test_node_name_type_custom_functions

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def test_node_name_type_custom_functions(self):  # type: () -> None
    def convert_acos(builder, node, graph, err):
      params = NeuralNetwork_pb2.CustomLayerParams()
      params.className = node.op_type
      params.description = "Custom layer that corresponds to the ONNX op {}".format(node.op_type, )

      builder.add_custom(
        name=node.name,
        input_names=node.inputs,
        output_names=node.outputs,
        custom_proto_spec=params
      )

    def convert_topk_generic(builder, node, graph, err):
      params = NeuralNetwork_pb2.CustomLayerParams()
      params.className = node.op_type
      params.description = "Custom layer that corresponds to the ONNX op {}".format(node.op_type, )
      params.parameters["axis"].intValue = node.attrs.get('axis', -1)
      params.parameters["k"].intValue = node.attrs['k']

      builder.add_custom(
        name=node.name,
        input_names=node.inputs,
        output_names=node.outputs,
        custom_proto_spec=params
      )

    def convert_topk_node_specific(builder, node, graph, err):
      params = NeuralNetwork_pb2.CustomLayerParams()
      params.className = node.op_type
      params.description = "Custom layer that corresponds to the ONNX op {}".format(node.op_type, )
      params.parameters["axis"].intValue = node.attrs.get('axis', -1)

      builder.add_custom(
        name=node.name,
        input_names=node.inputs,
        output_names=node.outputs,
        custom_proto_spec=params
      )

    onnx_model = _make_model_acos_exp_topk()
    onnx.save_model(onnx_model, 'acos.onnx')
    coreml_model = convert(model=onnx_model,
                           add_custom_layers=True,
                           custom_conversion_functions={'Acos':convert_acos, 'TopK':convert_topk_generic,
                                                        'output_values_output_indices':convert_topk_node_specific})

    spec = coreml_model.get_spec()
    layers = spec.neuralNetwork.layers
    self.assertIsNotNone(layers[0].custom)
    self.assertIsNotNone(layers[2].custom)
    self.assertEqual('Acos', layers[0].custom.className)
    self.assertEqual('TopK', layers[2].custom.className)
    self.assertEqual(0, layers[2].custom.parameters['axis'].intValue) 
开发者ID:onnx,项目名称:onnx-coreml,代码行数:56,代码来源:custom_layers_test.py

示例15: test_mask_rcnn

# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save_model [as 别名]
def test_mask_rcnn(self):
        set_converter('CropAndResize', convert_tf_crop_and_resize)
        onnx_model = keras2onnx.convert_keras(model.keras_model)

        import skimage
        img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
        image = skimage.io.imread(img_path)
        images = [image]
        case_name = 'mask_rcnn'

        if not os.path.exists(tmp_path):
            os.mkdir(tmp_path)
        temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
        onnx.save_model(onnx_model, temp_model_file)
        try:
            import onnxruntime
            sess = onnxruntime.InferenceSession(temp_model_file)
        except ImportError:
            return True

        # preprocessing
        molded_images, image_metas, windows = model.mold_inputs(images)
        anchors = model.get_anchors(molded_images[0].shape)
        anchors = np.broadcast_to(anchors, (model.config.BATCH_SIZE,) + anchors.shape)

        expected = model.keras_model.predict(
            [molded_images.astype(np.float32), image_metas.astype(np.float32), anchors])

        actual = \
            sess.run(None, {"input_image": molded_images.astype(np.float32),
                            "input_anchors": anchors,
                            "input_image_meta": image_metas.astype(np.float32)})

        rtol = 1.e-3
        atol = 1.e-6
        compare_idx = [0, 3]
        res = all(np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol) for n_ in compare_idx)
        if res and temp_model_file not in self.model_files:  # still keep the failed case files for the diagnosis.
            self.model_files.append(temp_model_file)
        if not res:
            for n_ in compare_idx:
                expected_list = expected[n_].flatten()
                actual_list = actual[n_].flatten()
                print_mismatches(case_name, n_, expected_list, actual_list, atol, rtol)

        self.assertTrue(res) 
开发者ID:onnx,项目名称:keras-onnx,代码行数:48,代码来源:test_mask_rcnn.py


注:本文中的onnx.save_model方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。