當前位置: 首頁>>代碼示例>>Python>>正文


Python onnxruntime.SessionOptions方法代碼示例

本文整理匯總了Python中onnxruntime.SessionOptions方法的典型用法代碼示例。如果您正苦於以下問題:Python onnxruntime.SessionOptions方法的具體用法?Python onnxruntime.SessionOptions怎麽用?Python onnxruntime.SessionOptions使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在onnxruntime的用法示例。


在下文中一共展示了onnxruntime.SessionOptions方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: load

# 需要導入模塊: import onnxruntime [as 別名]
# 或者: from onnxruntime import SessionOptions [as 別名]
def load(self, model_path, inputs=None, outputs=None):
        """Load model and find input/outputs from the model file."""
        opt = rt.SessionOptions()
        # enable level 3 optimizations
        # FIXME: enable below once onnxruntime 0.5 is released
        # opt.set_graph_optimization_level(3)
        self.sess = rt.InferenceSession(model_path, opt)
        # get input and output names
        if not inputs:
            self.inputs = [meta.name for meta in self.sess.get_inputs()]
        else:
            self.inputs = inputs
        if not outputs:
            self.outputs = [meta.name for meta in self.sess.get_outputs()]
        else:
            self.outputs = outputs
        return self 
開發者ID:mlperf,項目名稱:inference,代碼行數:19,代碼來源:backend_onnxruntime.py

示例2: load

# 需要導入模塊: import onnxruntime [as 別名]
# 或者: from onnxruntime import SessionOptions [as 別名]
def load(cls, load_dir, device, **kwargs):
        import onnxruntime
        sess_options = onnxruntime.SessionOptions()
        # Set graph optimization level to ORT_ENABLE_EXTENDED to enable bert optimization.
        sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
        # Use OpenMP optimizations. Only useful for CPU, has little impact for GPUs.
        sess_options.intra_op_num_threads = multiprocessing.cpu_count()
        onnx_session = onnxruntime.InferenceSession(str(load_dir / "model.onnx"), sess_options)

        # Prediction heads
        _, ph_config_files = cls._get_prediction_head_files(load_dir, strict=False)
        prediction_heads = []
        ph_output_type = []
        for config_file in ph_config_files:
            # ONNX Model doesn't need have a separate neural network for PredictionHead. It only uses the
            # instance methods of PredictionHead class, so, we load with the load_weights param as False.
            head = PredictionHead.load(config_file, load_weights=False)
            prediction_heads.append(head)
            ph_output_type.append(head.ph_output_type)

        with open(load_dir/"model_config.json") as f:
            model_config = json.load(f)
            language = model_config["language"]

        return cls(onnx_session, prediction_heads, language, device) 
開發者ID:deepset-ai,項目名稱:FARM,代碼行數:27,代碼來源:adaptive_model.py

示例3: __init__

# 需要導入模塊: import onnxruntime [as 別名]
# 或者: from onnxruntime import SessionOptions [as 別名]
def __init__(self, args):
        self.profile = args.profile
        self.options = onnxruntime.SessionOptions()
        self.options.enable_profiling = args.profile

        print("Loading ONNX model...")
        self.quantized = args.quantized
        if self.quantized:
            model_path = "build/data/bert_tf_v1_1_large_fp32_384_v2/bert_large_v1_1_fake_quant.onnx"
        else:
            model_path = "build/data/bert_tf_v1_1_large_fp32_384_v2/model.onnx"
        self.sess = onnxruntime.InferenceSession(model_path, self.options)

        print("Constructing SUT...")
        self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries, self.process_latencies)
        print("Finished constructing SUT.")

        self.qsl = get_squad_QSL() 
開發者ID:mlperf,項目名稱:inference,代碼行數:20,代碼來源:onnxruntime_SUT.py

示例4: __init__

# 需要導入模塊: import onnxruntime [as 別名]
# 或者: from onnxruntime import SessionOptions [as 別名]
def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        sess_options = rt.SessionOptions()

        self.model_dir = glob.glob(os.path.join(self.model_dir, '*.onnx'))[0]

        # Set graph optimization level to ORT_ENABLE_EXTENDED to enable bert optimization.
        sess_options.graph_optimization_level = rt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED

        # To enable model serialization and store the optimized graph to desired location.
        sess_options.optimized_model_filepath = self.model_dir
        self.session = rt.InferenceSession(self.model_dir, sess_options)
        if 'albert' in self.model_dir:
            self.tokenizer = AutoTokenizer.from_pretrained('albert-base-uncased')
        else:
            self.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') 
開發者ID:koursaros-ai,項目名稱:nboost,代碼行數:18,代碼來源:onnxbert.py

示例5: _create_session_via_execution_providers_api

# 需要導入模塊: import onnxruntime [as 別名]
# 或者: from onnxruntime import SessionOptions [as 別名]
def _create_session_via_execution_providers_api(self, model):
        session_options = onnx_rt.SessionOptions()
        session = onnx_rt.InferenceSession(model, sess_options=session_options)
        self.execution_providers = self.get_value_from_config('execution_providers')
        available_providers = session.get_providers()
        contains_all(available_providers, self.execution_providers)
        session.set_providers(self.execution_providers)

        return session 
開發者ID:opencv,項目名稱:open_model_zoo,代碼行數:11,代碼來源:onnx_launcher.py

示例6: optimize_by_onnxruntime

# 需要導入模塊: import onnxruntime [as 別名]
# 或者: from onnxruntime import SessionOptions [as 別名]
def optimize_by_onnxruntime(onnx_model_path, use_gpu=False, optimized_model_path=None, opt_level=99):
    """
    Use onnxruntime package to optimize model. It could support models exported by PyTorch.

    Args:
        onnx_model_path (str): th path of input onnx model.
        use_gpu (bool): whether the optimized model is targeted to run in GPU.
        optimized_model_path (str or None): the path of optimized model.

    Returns:
        optimized_model_path: the path of optimized model
    """
    import onnxruntime

    if use_gpu and 'CUDAExecutionProvider' not in onnxruntime.get_available_providers():
        logger.error("There is no gpu for onnxruntime to do optimization.")
        return onnx_model_path

    sess_options = onnxruntime.SessionOptions()
    if opt_level == 1:
        sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_BASIC
    elif opt_level == 2:
        sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
    else:
        assert opt_level == 99
        sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL

    if optimized_model_path is None:
        path_prefix = onnx_model_path[:-5]  #remove .onnx suffix
        optimized_model_path = "{}_ort_{}.onnx".format(path_prefix, "gpu" if use_gpu else "cpu")

    sess_options.optimized_model_filepath = optimized_model_path

    if not use_gpu:
        session = onnxruntime.InferenceSession(onnx_model_path, sess_options, providers=['CPUExecutionProvider'])
    else:
        session = onnxruntime.InferenceSession(onnx_model_path, sess_options)
        assert 'CUDAExecutionProvider' in session.get_providers()  # Make sure there is GPU

    assert os.path.exists(optimized_model_path) and os.path.isfile(optimized_model_path)
    logger.info("Save optimized model by onnxruntime to {}".format(optimized_model_path))
    return optimized_model_path 
開發者ID:deepset-ai,項目名稱:FARM,代碼行數:44,代碼來源:bert_model_optimization.py


注:本文中的onnxruntime.SessionOptions方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。