本文整理汇总了Python中onnxruntime.InferenceSession方法的典型用法代码示例。如果您正苦于以下问题:Python onnxruntime.InferenceSession方法的具体用法?Python onnxruntime.InferenceSession怎么用?Python onnxruntime.InferenceSession使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类onnxruntime
的用法示例。
在下文中一共展示了onnxruntime.InferenceSession方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_shape', help="caffe's caffemodel file path", nargs='+', default=(224,224))
parser.add_argument('--img_path', help="test image path", type=str, default="./onnxmodel/airplane.jpg")
parser.add_argument('--onnx_path', help="onnx model file path", type=str, default="./onnxmodel/resnet50.onnx")
args = parser.parse_args()
input_shape = [int(x) for x in args.input_shape] #模型输入尺寸
img_path = args.img_path
onnx_path = args.onnx_path
print("image path:",img_path)
print("onnx model path:",onnx_path)
data_input = process_image(img_path,input_shape)
session = onnxruntime.InferenceSession(onnx_path)
inname = [input.name for input in session.get_inputs()]
outname = [output.name for output in session.get_outputs()]
print("inputs name:",inname,"|| outputs name:",outname)
data_output = session.run(outname, {inname[0]: data_input})
output = data_output[0]
print("Label predict: ", output.argmax())
示例2: test_kmeans
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def test_kmeans(self):
model = KMeans()
X, y = make_regression(n_features=4, random_state=42)
model.fit(X, y)
initial_types = [('input', FloatTensorType((None, X.shape[1])))]
with self.assertRaises(RuntimeError):
convert_sklearn(model, initial_types=initial_types,
final_types=[('output4', None)])
with self.assertRaises(RuntimeError):
convert_sklearn(model, initial_types=initial_types,
final_types=[('dup1', None), ('dup1', None)],
target_opset=TARGET_OPSET)
model_onnx = convert_sklearn(
model, initial_types=initial_types,
final_types=[('output4', None), ('output5', None)],
target_opset=TARGET_OPSET)
assert model_onnx is not None
sess = InferenceSession(model_onnx.SerializeToString())
assert sess.get_outputs()[0].name == 'output4'
assert sess.get_outputs()[1].name == 'output5'
示例3: load
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def load(cls, load_dir, device, **kwargs):
import onnxruntime
sess_options = onnxruntime.SessionOptions()
# Set graph optimization level to ORT_ENABLE_EXTENDED to enable bert optimization.
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
# Use OpenMP optimizations. Only useful for CPU, has little impact for GPUs.
sess_options.intra_op_num_threads = multiprocessing.cpu_count()
onnx_session = onnxruntime.InferenceSession(str(load_dir / "model.onnx"), sess_options)
# Prediction heads
_, ph_config_files = cls._get_prediction_head_files(load_dir, strict=False)
prediction_heads = []
ph_output_type = []
for config_file in ph_config_files:
# ONNX Model doesn't need have a separate neural network for PredictionHead. It only uses the
# instance methods of PredictionHead class, so, we load with the load_weights param as False.
head = PredictionHead.load(config_file, load_weights=False)
prediction_heads.append(head)
ph_output_type.append(head.ph_output_type)
with open(load_dir/"model_config.json") as f:
model_config = json.load(f)
language = model_config["language"]
return cls(onnx_session, prediction_heads, language, device)
示例4: load
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def load(self, model_path, inputs=None, outputs=None):
"""Load model and find input/outputs from the model file."""
opt = rt.SessionOptions()
# enable level 3 optimizations
# FIXME: enable below once onnxruntime 0.5 is released
# opt.set_graph_optimization_level(3)
self.sess = rt.InferenceSession(model_path, opt)
# get input and output names
if not inputs:
self.inputs = [meta.name for meta in self.sess.get_inputs()]
else:
self.inputs = inputs
if not outputs:
self.outputs = [meta.name for meta in self.sess.get_outputs()]
else:
self.outputs = outputs
return self
示例5: __init__
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def __init__(self, args):
self.profile = args.profile
self.options = onnxruntime.SessionOptions()
self.options.enable_profiling = args.profile
print("Loading ONNX model...")
self.quantized = args.quantized
if self.quantized:
model_path = "build/data/bert_tf_v1_1_large_fp32_384_v2/bert_large_v1_1_fake_quant.onnx"
else:
model_path = "build/data/bert_tf_v1_1_large_fp32_384_v2/model.onnx"
self.sess = onnxruntime.InferenceSession(model_path, self.options)
print("Constructing SUT...")
self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries, self.process_latencies)
print("Finished constructing SUT.")
self.qsl = get_squad_QSL()
示例6: load
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def load(cls, bundle, **kwargs):
"""Load a model from a bundle.
This can be either a local model or a remote, exported model.
:returns a Service implementation
"""
import onnxruntime as ort
if os.path.isdir(bundle):
directory = bundle
else:
directory = unzip_files(bundle)
model_basename = find_model_basename(directory)
model_name = f"{model_basename}.onnx"
vocabs = load_vocabs(directory)
vectorizers = load_vectorizers(directory)
# Currently nothing to do here
labels = read_json(model_basename + '.labels')
model = ort.InferenceSession(model_name)
return cls(vocabs, vectorizers, model, labels)
示例7: test_pad_opset_10
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def test_pad_opset_10(self):
pad = OnnxPad('X', output_names=['Y'],
mode='constant', value=1.5,
pads=[0, 1, 0, 1],
op_version=2)
X = np.array([[0, 1]], dtype=np.float32)
model_def = pad.to_onnx({'X': X}, target_opset=10)
onnx.checker.check_model(model_def)
def predict_with_onnxruntime(model_def, *inputs):
sess = ort.InferenceSession(model_def.SerializeToString())
names = [i.name for i in sess.get_inputs()]
dinputs = {name: input for name, input in zip(names, inputs)}
res = sess.run(None, dinputs)
names = [o.name for o in sess.get_outputs()]
return {name: output for name, output in zip(names, res)}
Y = predict_with_onnxruntime(model_def, X)
assert_almost_equal(
np.array([[1.5, 0., 1., 1.5]], dtype=np.float32), Y['Y'])
示例8: test_model_tfidf_vectorizer11
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def test_model_tfidf_vectorizer11(self):
corpus = numpy.array([
"This is the first document.",
"This document is the second document.",
"And this is the third one.",
"Is this the first document?",
]).reshape((4, 1))
vect = TfidfVectorizer(ngram_range=(1, 1), norm=None)
vect.fit(corpus.ravel())
model_onnx = convert_sklearn(vect, "TfidfVectorizer",
[("input", StringTensorType())],
options=self.get_options())
self.assertTrue(model_onnx is not None)
dump_data_and_model(
corpus,
vect,
model_onnx,
basename="SklearnTfidfVectorizer11-OneOff-SklCol",
allow_failure="StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.4.0')",
)
sess = InferenceSession(model_onnx.SerializeToString())
res = sess.run(None, {'input': corpus.ravel()})[0]
assert res.shape == (4, 9)
示例9: test_model_tfidf_vectorizer11_compose
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def test_model_tfidf_vectorizer11_compose(self):
corpus = numpy.array([
"This is the first document.",
"This document is the second document.",
"And this is the third one.",
"Is this the first document?",
]).reshape((4, 1))
corpus = numpy.hstack([corpus, corpus])
y = numpy.array([0, 1, 0, 1])
model = ColumnTransformer([
('a', TfidfVectorizer(), 0),
('b', TfidfVectorizer(), 1),
])
model.fit(corpus, y)
model_onnx = convert_sklearn(model, "TfIdfcomp",
[("input", StringTensorType([4, 2]))],
options=self.get_options())
sess = InferenceSession(model_onnx.SerializeToString())
res = sess.run(None, {'input': corpus})[0]
exp = model.transform(corpus)
assert_almost_equal(res, exp)
示例10: test_kernel_ker2_def
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def test_kernel_ker2_def(self):
ker = Sum(
C(0.1, (1e-3, 1e3)) * RBF(length_scale=10,
length_scale_bounds=(1e-3, 1e3)),
C(0.1, (1e-3, 1e3)) * RBF(length_scale=1,
length_scale_bounds=(1e-3, 1e3))
)
onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32,
op_version=_TARGET_OPSET_)
model_onnx = onx.to_onnx(
inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32)
sess = InferenceSession(model_onnx.SerializeToString())
res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0]
m1 = res
m2 = ker(Xtest_)
assert_almost_equal(m1, m2, decimal=0)
示例11: test_kernel_ker2_exp_sine_squared
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def test_kernel_ker2_exp_sine_squared(self):
ker = ExpSineSquared()
onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32,
op_version=_TARGET_OPSET_)
model_onnx = onx.to_onnx(
inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32)
sess = InferenceSession(model_onnx.SerializeToString())
res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0]
m1 = res
m2 = ker(Xtest_)
assert_almost_equal(m1, m2, decimal=4)
onx = convert_kernel(ker, 'X', output_names=['Z'],
x_train=(Xtest_ * 2).astype(np.float32),
dtype=np.float32, op_version=_TARGET_OPSET_)
model_onnx = onx.to_onnx(
inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32)
sess = InferenceSession(model_onnx.SerializeToString())
res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0]
m1 = res
m2 = ker(Xtest_, Xtest_ * 2)
assert_almost_equal(m1, m2, decimal=4)
示例12: test_kernel_dot_product
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def test_kernel_dot_product(self):
ker = DotProduct()
onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32,
op_version=_TARGET_OPSET_)
model_onnx = onx.to_onnx(
inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32)
sess = InferenceSession(model_onnx.SerializeToString())
res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0]
m1 = res
m2 = ker(Xtest_)
assert_almost_equal(m1 / 1000, m2 / 1000, decimal=5)
onx = convert_kernel(ker, 'X', output_names=['Z'],
x_train=(Xtest_ * 2).astype(np.float32),
dtype=np.float32, op_version=_TARGET_OPSET_)
model_onnx = onx.to_onnx(
inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32)
sess = InferenceSession(model_onnx.SerializeToString())
res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0]
m1 = res
m2 = ker(Xtest_, Xtest_ * 2)
assert_almost_equal(m1 / 1000, m2 / 1000, decimal=5)
示例13: test_algebra_abs
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def test_algebra_abs(self):
op = OnnxAbs('I0', op_version=TARGET_OPSET)
onx = op.to_onnx({'I0': numpy.empty((1, 2), dtype=numpy.float32)})
assert onx is not None
import onnxruntime as ort
try:
sess = ort.InferenceSession(onx.SerializeToString())
except RuntimeError as e:
raise RuntimeError("Unable to read\n{}".format(onx)) from e
X = numpy.array([[0, 1], [-1, -2]])
try:
Y = sess.run(None, {'I0': X.astype(numpy.float32)})[0]
except RuntimeError as e:
raise RuntimeError("Unable to run\n{}".format(onx)) from e
assert_almost_equal(Y, numpy.abs(X))
示例14: test_algebra_normalizer
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def test_algebra_normalizer(self):
op = OnnxNormalizer('I0', norm='L1', op_version=1,
output_names=['Y'])
onx = op.to_onnx({'I0': numpy.ones((1, 2), dtype=numpy.float32)},
outputs=[('Y', FloatTensorType())],
target_opset={'': 10})
assert onx is not None
sonx = str(onx)
assert "ai.onnx.ml" in sonx
assert "version: 1" in sonx
import onnxruntime as ort
sess = ort.InferenceSession(onx.SerializeToString())
X = numpy.array([[0, 2], [0, -2]])
exp = numpy.array([[0, 1], [0, -1]])
Y = sess.run(None, {'I0': X.astype(numpy.float32)})[0]
assert_almost_equal(exp, Y)
示例15: test_algebra_normalizer_shape
# 需要导入模块: import onnxruntime [as 别名]
# 或者: from onnxruntime import InferenceSession [as 别名]
def test_algebra_normalizer_shape(self):
op = OnnxNormalizer('I0', norm='L1', op_version=1, output_names=['O0'])
onx = op.to_onnx({'I0': numpy.ones((1, 2), dtype=numpy.float32)},
outputs=[('O0', FloatTensorType((None, 2)))])
assert onx is not None
sonx = str(onx)
assert "ai.onnx.ml" in sonx
assert "version: 1" in sonx
import onnxruntime as ort
sess = ort.InferenceSession(onx.SerializeToString())
X = numpy.array([[0, 2], [0, -2]])
exp = numpy.array([[0, 1], [0, -1]])
Y = sess.run(None, {'I0': X.astype(numpy.float32)})[0]
assert_almost_equal(exp, Y)