YOLO入门教程(二)——OpenVINO™部署YOLO目标检测模型与异步推理实现150FPS+实时检测【含教程源码 + 环境配置】

时间:2025-04-07 07:59:20
using OpenCvSharp.Dnn; using OpenCvSharp; using OpenVinoSharp; using System; using System.Collections.Generic; using System.Linq; using System.Runtime.InteropServices; using System.Text; using System.Threading.Tasks; using OpenVinoSharp.preprocess; using System.Diagnostics; using OpenVinoSharp.Extensions; using OpenVinoSharp.Extensions.model; namespace DeepLearningDotNet { public class Predictor : IDisposable { /// <summary> /// OpenVINO Runtime Core /// </summary> private Core core; /// <summary> /// 加载前处理API的模型 /// </summary> private Model model; /// <summary> /// 加载到设备中的模型 /// </summary> private CompiledModel compiled; /// <summary> /// OpenVINO推理接口 /// </summary> public InferRequest openvino_api_infer; /// <summary> /// OpenCV推理接口 /// </summary> public Net opencv_infer; /// <summary> /// 接口类型 /// </summary> private string engine; /// <summary> /// 模型图片尺寸 /// </summary> private int[] input_size; public Predictor() { } /// <summary> /// 实例化推理器 /// </summary> /// <param name="model_path"></param> /// <param name="engine"></param> /// <param name="device"></param> /// <exception cref="ArgumentNullException"></exception> public Predictor(string model_path, string engine = "OpenVINO", string device = "AUTO", int[] input_size = null) { //判断路径是否合法 if (model_path == null || model_path == "" || !File.Exists(model_path)) { throw new ArgumentNullException(nameof(model_path)); } this.engine = engine; if (engine == "OpenVINO") { // -------- Step 1. Initialize OpenVINO Runtime Core -------- core = new Core(); //判断设备是否可用 if (!core.get_available_devices().Contains(device)) { throw new ArgumentNullException(nameof(device)); } // -------- Step 2. Read inference model -------- Model tempModel = core.read_model(model_path); OvExtensions.printf_model_info(tempModel); PrePostProcessor processor = new PrePostProcessor(tempModel); this.input_size = input_size ?? new int[] { 1, 3, 640, 640 }; Tensor input_tensor_pro = new Tensor(new OvType(ElementType.U8), new Shape(this.input_size[0], this.input_size[2], this.input_size[3], this.input_size[1])); InputInfo input_info = processor.input(0); InputTensorInfo input_tensor_info = input_info.tensor(); input_tensor_info.set_from(input_tensor_pro).set_layout(new Layout("NHWC")).set_color_format(ColorFormat.BGR); PreProcessSteps process_steps = input_info.preprocess(); process_steps.convert_color(ColorFormat.RGB).resize(ResizeAlgorithm.RESIZE_LINEAR) .convert_element_type(new OvType(ElementType.F32)).scale(255.0f).convert_layout(new Layout("NCHW")); model = processor.build(); // -------- Step 3. Loading a model to the device -------- compiled = core.compile_model(model, device); // -------- Step 4. Create an infer request -------- openvino_api_infer = compiled.create_infer_request(); } if (engine == "OpenCv") { opencv_infer = CvDnn.ReadNetFromOnnx(model_path); } } public void Dispose() { openvino_api_infer.Dispose(); opencv_infer.Dispose(); compiled.Dispose(); model.Dispose(); core.Dispose(); GC.Collect(); } /// <summary> /// OpenVINO推理方法 /// </summary> /// <param name="input_data"></param> /// <param name="input_names"></param> /// <param name="input_size"></param> /// <param name="output_names"></param> /// <param name="output_sizes"></param> /// <returns></returns> public List<float[]> OpenVinoInfer(Mat img, List<string> input_names, int[] input_size, List<string> output_names, List<int[]> output_sizes) { List<float[]> returns = new List<float[]>(); try { // -------- Step 6. Set up input data -------- if (set_input_tensor_data(img)) { // -------- Step 7. Do inference synchronously -------- openvino_api_infer.infer(); // -------- Step 8. Get infer result data -------- Tensor output_tensor = openvino_api_infer.get_output_tensor(); int output_length = (int)output_tensor.get_size(); float[] output_data = output_tensor.get_data<float>(output_length); returns.Add(output_data); } return returns; } catch { return returns; } } /// <summary> /// 加载数据到推理器中 /// 该函数不能放在继承类中会内存泄露 /// </summary> /// <param name="img">输入前处理后的img</param> /// <returns></returns> public bool set_input_tensor_data(Mat img) { try { //输入的图片未经过前处理则不加载到推理器中 if (Math.Max(img.Size().Width, img.Size().Height) != input_size[2] && Math.Max(img.Size().Width, img.Size().Height) != input_size[3]) return false; // 从推理器中获取输入数据格式 Tensor input_tensor = openvino_api_infer.get_input_tensor(); Shape input_shape = input_tensor.get_shape(); byte[] input_data = new byte[input_shape[1] * input_shape[2] * input_shape[3]]; // 通过非托管指针img数据写入到input_data数组 Marshal.Copy(img.Ptr(0), input_data, 0, input_data.Length); IntPtr destination = input_tensor.data(); // 把input_tensor的指针指向input_data数组 Marshal.Copy(input_data, 0, destination, input_data.Length); return true; } catch { return false; } } /// <summary> /// OpenCv推理方法 /// </summary> /// <param name="input_data"></param> /// <param name="input_names"></param> /// <param name="input_size"></param> /// <param name="output_names"></param> /// <param name="output_sizes"></param> /// <returns></returns> public List<float[]> OpenCvInfer(Mat img, List<string> input_names, int[] input_size, List<string> output_names, List<int[]> output_sizes) { List<float[]> returns = new List<float[]>(); float[] input_data = OpenVinoSharp.Extensions.process.Permute.run(img); var input_tensor = openvino_api_infer.get_input_tensor(); input_tensor.set_data(input_data); openvino_api_infer.infer(); foreach (var name in output_names) { var output_tensor = openvino_api_infer.get_tensor(name); returns.Add(output_tensor.get_data<float>((int)output_tensor.get_size())); } return returns; } } }