onnxruntime InferenceSession

 

 

How to extract output tensor from any layer of models · Issue #1455 · microsoft/onnxruntime (github.com)

 

Walk through intermediate outputs — sklearn-onnx 1.11 documentation

 

 

import onnxruntime
import onnx
import numpy as np

input = np.random.rand(1,3,224,224).astype(dtype=np.float32)
sess = onnxruntime.InferenceSession("add_model.onnx")
result = sess.run(["output"],{"input":input})
print(result)

 

 

import numpy as np
import onnx
import onnxruntime as rt
 
#create input data
input_data = np.ones((1, 3, 299, 299), dtype=np.float32)
#create runtime session
sess = rt.InferenceSession("inception_v3.onnx")
# get output name
input_name = sess.get_inputs()[0].name
print("input name", input_name)
output_name= sess.get_outputs()[0].name
print("output name", output_name)
output_shape = sess.get_outputs()[0].shape
print("output shape", output_shape)
#forward model
res = sess.run([output_name], {input_name: input_data})
out = np.array(res)

 

posted @ 2023-01-04 13:12  sinferwu  阅读(672)  评论(0编辑  收藏  举报