openvino yolov9

main
lincaigui 5 months ago
commit 4fe539f4e2

Binary file not shown.

After

Width:  |  Height:  |  Size: 182 KiB

@ -0,0 +1,295 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "812a3abf-ca0a-46d6-8c20-b58d4c920f12",
"metadata": {},
"outputs": [],
"source": [
"from openvino.runtime import Core\n",
"import openvino.runtime as ov\n",
"import cv2 as cv\n",
"import numpy as np\n",
"import tensorflow as tf"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1f2c5225-46d4-4d05-8d24-744a65708eec",
"metadata": {},
"outputs": [],
"source": [
"class Predictor:\n",
" \"\"\"\n",
" OpenVINO 模型推理器\n",
" \"\"\"\n",
" def __init__(self, model_path):\n",
" ie_core = Core()\n",
" model = ie_core.read_model(model=model_path)\n",
" self.compiled_model = ie_core.compile_model(model=model, device_name=\"CPU\")\n",
" def get_inputs_name(self, num):\n",
" return self.compiled_model.input(num)\n",
" \n",
" def get_outputs_name(self, num):\n",
" return self.compiled_model.output(num)\n",
" \n",
" def predict(self, input_data):\n",
" return self.compiled_model([input_data])\n",
" \n",
" def get_request(self):\n",
" return self.compiled_model.create_infer_request()\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "956f7097-a0b4-4d69-97fe-969c7490287f",
"metadata": {},
"outputs": [],
"source": [
"def process_image(input_image, size):\n",
" \"\"\"输入图片与处理方法按照PP-Yoloe模型要求预处理图片数据\n",
"\n",
" Args:\n",
" input_image (uint8): 输入图片矩阵\n",
" size (int): 模型输入大小\n",
"\n",
" Returns:\n",
" float32: 返回处理后的图片矩阵数据\n",
" \"\"\"\n",
" max_len = max(input_image.shape)\n",
" img = np.zeros([640,640,3],np.uint8)\n",
" img[0:input_image.shape[0],0:input_image.shape[1]] = input_image # 将图片放到正方形背景中\n",
" img = cv.cvtColor(img,cv.COLOR_BGR2RGB) # BGR转RGB\n",
" img = cv.resize(img, (size, size), cv.INTER_NEAREST) # 缩放图片\n",
" img = np.transpose(img,[2, 0, 1]) # 转换格式\n",
" img = img / 255.0 # 归一化\n",
" img = np.expand_dims(img,0) # 增加维度\n",
" return img.astype(np.float32)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "f61fca39-5c27-4e45-94a2-d683c5a2462e",
"metadata": {},
"outputs": [],
"source": [
"def process_result(box_results, conf_results):\n",
" \"\"\"按照PP-Yolove模型输出要求处理数据非极大值抑制提取预测结果\n",
"\n",
" Args:\n",
" box_results (float32): 预测框预测结果\n",
" conf_results (float32): 置信度预测结果\n",
" Returns:\n",
" float: 预测框\n",
" float: 分数\n",
" int: 类别\n",
" \"\"\"\n",
" conf_results = np.transpose(conf_results,[0, 2, 1]) # 转置\n",
" # 设置输出形状\n",
" box_results =box_results.reshape(8400,4) \n",
" conf_results = conf_results.reshape(8400,3)\n",
" scores = []\n",
" classes = []\n",
" boxes = []\n",
" for i in range(8400):\n",
" conf = conf_results[i,:] # 预测分数\n",
" score = np.max(conf) # 获取类别\n",
" # 筛选较小的预测类别\n",
" if score > 0.5:\n",
" classes.append(np.argmax(conf)) \n",
" scores.append(score) \n",
" boxes.append(box_results[i,:])\n",
" scores = np.array(scores)\n",
" boxes = np.array(boxes)\n",
" \n",
" result_box = []\n",
" result_score = []\n",
" result_class = []\n",
" # 非极大值抑制筛选重复的预测结果\n",
" if len(boxes) != 0:\n",
" # 非极大值抑制结果\n",
" indexs = tf.image.non_max_suppression(boxes,scores,len(scores),0.25,0.35)\n",
" for i, index in enumerate(indexs):\n",
" result_score.append(scores[index])\n",
" result_box.append(boxes[index,:])\n",
" result_class.append(classes[index])\n",
" # 返回结果\n",
" return np.array(result_box),np.array(result_score),np.array(result_class)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "d20c64ea-c5c6-4a5f-9af7-5c34fa1e60d9",
"metadata": {},
"outputs": [],
"source": [
"def draw_box(image, boxes, scores, classes, labels):\n",
" \"\"\"将预测结果绘制到图像上\n",
"\n",
" Args:\n",
" image (uint8): 原图片\n",
" boxes (float32): 预测框\n",
" scores (float32): 分数\n",
" classes (int): 类别\n",
" lables (str): 标签\n",
"\n",
" Returns:\n",
" uint8: 标注好的图片\n",
" \"\"\"\n",
" colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0)]\n",
" scale = max(image.shape) / 640.0 # 缩放比例\n",
" if len(classes) != 0:\n",
" for i in range(len(classes)):\n",
" box = boxes[i,:]\n",
" x1 = int(box[0] * scale)\n",
" y1 = int(box[1] * scale)\n",
" x2 = int(box[2] * scale)\n",
" y2 = int(box[3] * scale)\n",
" label = labels[classes[i]]\n",
" score = scores[i]\n",
" cv.rectangle(image, (x1, y1), (x2, y2), colors[classes[i]], 2, cv.LINE_8)\n",
" cv.putText(image,label+\":\"+str(score),(x1,y1-10),cv.FONT_HERSHEY_SIMPLEX, 0.55, colors[classes[i]], 2)\n",
" \n",
" return image\n"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "05a749f9-bec4-4dc4-b1e9-db50e7663eef",
"metadata": {},
"outputs": [],
"source": [
"def read_label(label_path):\n",
" with open(label_path, 'r') as f:\n",
" labels = f.read().split()\n",
" return labels"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "f089212d-b1f6-4e6c-bed3-efdf22c77321",
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "27f0b486-1994-4b93-9428-9619310936b9",
"metadata": {},
"outputs": [
{
"ename": "RuntimeError",
"evalue": "Exception from src\\inference\\src\\cpp\\infer_request.cpp:112:\nException from src\\inference\\src\\cpp\\infer_request.cpp:66:\nException from src\\plugins\\intel_cpu\\src\\infer_request.cpp:390:\nCan't set the input tensor with name: images, because the model input (shape=[?,3,?,?]) and the tensor (shape=(640.640.3)) are incompatible\n\n\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[28], line 8\u001b[0m\n\u001b[0;32m 6\u001b[0m image \u001b[38;5;241m=\u001b[39m cv\u001b[38;5;241m.\u001b[39mimread(image_path)\n\u001b[0;32m 7\u001b[0m pimage \u001b[38;5;241m=\u001b[39m letterbox(image)[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m----> 8\u001b[0m results \u001b[38;5;241m=\u001b[39m \u001b[43mpredictor\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpredict\u001b[49m\u001b[43m(\u001b[49m\u001b[43minput_data\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpimage\u001b[49m\u001b[43m)\u001b[49m[outputs_name]\n\u001b[0;32m 9\u001b[0m \u001b[38;5;28mprint\u001b[39m()\n\u001b[0;32m 10\u001b[0m boxes_results \u001b[38;5;241m=\u001b[39m results[:, :\u001b[38;5;241m4\u001b[39m, :]\n",
"Cell \u001b[1;32mIn[2], line 16\u001b[0m, in \u001b[0;36mPredictor.predict\u001b[1;34m(self, input_data)\u001b[0m\n\u001b[0;32m 15\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mpredict\u001b[39m(\u001b[38;5;28mself\u001b[39m, input_data):\n\u001b[1;32m---> 16\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompiled_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[43minput_data\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[1;32mD:\\envs\\openvino\\lib\\site-packages\\openvino\\runtime\\ie_api.py:365\u001b[0m, in \u001b[0;36mCompiledModel.__call__\u001b[1;34m(self, inputs, share_inputs, share_outputs, decode_strings)\u001b[0m\n\u001b[0;32m 362\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_infer_request \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 363\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_infer_request \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcreate_infer_request()\n\u001b[1;32m--> 365\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_infer_request\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minfer\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 366\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 367\u001b[0m \u001b[43m \u001b[49m\u001b[43mshare_inputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mshare_inputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 368\u001b[0m \u001b[43m \u001b[49m\u001b[43mshare_outputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mshare_outputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 369\u001b[0m \u001b[43m \u001b[49m\u001b[43mdecode_strings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecode_strings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 370\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[1;32mD:\\envs\\openvino\\lib\\site-packages\\openvino\\runtime\\ie_api.py:132\u001b[0m, in \u001b[0;36mInferRequest.infer\u001b[1;34m(self, inputs, share_inputs, share_outputs, decode_strings)\u001b[0m\n\u001b[0;32m 55\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minfer\u001b[39m(\n\u001b[0;32m 56\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[0;32m 57\u001b[0m inputs: Any \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 61\u001b[0m decode_strings: \u001b[38;5;28mbool\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[0;32m 62\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m OVDict:\n\u001b[0;32m 63\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Infers specified input(s) in synchronous mode.\u001b[39;00m\n\u001b[0;32m 64\u001b[0m \n\u001b[0;32m 65\u001b[0m \u001b[38;5;124;03m Blocks all methods of InferRequest while request is running.\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 130\u001b[0m \u001b[38;5;124;03m :rtype: OVDict\u001b[39;00m\n\u001b[0;32m 131\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m--> 132\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m OVDict(\u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minfer\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_data_dispatch\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 133\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 134\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 135\u001b[0m \u001b[43m \u001b[49m\u001b[43mis_shared\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mshare_inputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 136\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mshare_outputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mshare_outputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdecode_strings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecode_strings\u001b[49m\u001b[43m)\u001b[49m)\n",
"\u001b[1;31mRuntimeError\u001b[0m: Exception from src\\inference\\src\\cpp\\infer_request.cpp:112:\nException from src\\inference\\src\\cpp\\infer_request.cpp:66:\nException from src\\plugins\\intel_cpu\\src\\infer_request.cpp:390:\nCan't set the input tensor with name: images, because the model input (shape=[?,3,?,?]) and the tensor (shape=(640.640.3)) are incompatible\n\n\n"
]
}
],
"source": [
"label_path = \"labels.txt\"\n",
"image_path = \"test.jpg\"\n",
"yoloe_model_path = \"models/yolov8n_openvino_model/yolov8n.xml\"\n",
"predictor = Predictor(model_path = yoloe_model_path)\n",
"outputs_name = predictor.get_outputs_name(0)\n",
"image = cv.imread(image_path)\n",
"pimage = letterbox(image)[0]\n",
"results = predictor.predict(input_data=pimage)[outputs_name]\n",
"print()\n",
"boxes_results = results[:, :4, :]\n",
"conf_results = results[:, 4: 7, :]\n",
"print(boxes_results.reshape(8400, 4))\n",
"print(conf_results.reshape(8400, 3))\n",
"\n",
"labels = read_label(label_path=label_path)\n",
"boxes, scores, classes = process_result(box_results=boxes_results, conf_results=conf_results)\n",
"result_frame = draw_box(image=image, boxes=boxes, scores=scores, classes=classes, labels=labels)\n",
"print(classes)\n",
"plt.imshow(result_frame)\n",
"plt.show()\n",
"# conf_name = predictor.get_outputs_name(1)\n",
"\n",
"# cap = cv.VideoCapture(0)\n",
"# while cap.isOpened():\n",
"# ret, frame = cap.read()\n",
"# frame = cv.flip(frame, 180)\n",
"# cv.namedWindow(\"MaskDetection\", 0) # 0可调大小注意窗口名必须imshow里面的一窗口名一直\n",
"# cv.resizeWindow(\"MaskDetection\", 640, 480) # 设置长和宽\n",
"# input_frame = process_image(frame, 640)\n",
"# results = predictor.predict(input_data=input_frame)\n",
"# boxes, scores, classes = process_result(box_results=results[boxes_name], conf_results=results[conf_name])\n",
"# result_frame = draw_box(image=frame, boxes=boxes, scores=scores, classes=classes, labels=labels)\n",
"# cv.imshow('MaskDetection', result_frame)\n",
"# key = cv.waitKey(1)\n",
"# if key == 27: #esc退出\n",
"# break\n",
"# cap.release()\n",
"# cv.destroyAllWindows()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4c2832b8-97f5-419b-90a1-30c092456112",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "68507c5f-a4f1-4df4-bfc6-97b1cbbb24a8",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f9c5749-fad0-40c4-ab10-93733f325e98",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,344 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "8d41befc-7e65-4110-9118-35dce6e6ab0c",
"metadata": {},
"outputs": [],
"source": [
"from openvino.runtime import Core\n",
"import openvino.runtime as ov\n",
"import cv2 as cv\n",
"import numpy as np\n",
"from PIL import Image\n",
"from ultralytics.yolo.utils import ops\n",
"import torch\n",
"from ultralytics.yolo.utils.plotting import colors"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "cd01038a-fe1a-4b47-ad49-b0641afdaee5",
"metadata": {},
"outputs": [],
"source": [
"def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=False, scale_fill=False, scaleup=False, stride=32):\n",
" \"\"\"\n",
" Resize image and padding for detection. Takes image as input,\n",
" resizes image to fit into new shape with saving original aspect ratio and pads it to meet stride-multiple constraints\n",
"\n",
" Parameters:\n",
" img (np.ndarray): image for preprocessing\n",
" new_shape (Tuple(int, int)): image size after preprocessing in format [height, width]\n",
" color (Tuple(int, int, int)): color for filling padded area\n",
" auto (bool): use dynamic input size, only padding for stride constrins applied\n",
" scale_fill (bool): scale image to fill new_shape\n",
" scaleup (bool): allow scale image if it is lower then desired input size, can affect model accuracy\n",
" stride (int): input padding stride\n",
" Returns:\n",
" img (np.ndarray): image after preprocessing\n",
" ratio (Tuple(float, float)): hight and width scaling ratio\n",
" padding_size (Tuple(int, int)): height and width padding size\n",
"\n",
"\n",
" \"\"\"\n",
" # Resize and pad image while meeting stride-multiple constraints\n",
" shape = img.shape[:2] # current shape [height, width]\n",
" if isinstance(new_shape, int):\n",
" new_shape = (new_shape, new_shape)\n",
"\n",
" # Scale ratio (new / old)\n",
" r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n",
" if not scaleup: # only scale down, do not scale up (for better test mAP)\n",
" r = min(r, 1.0)\n",
"\n",
" # Compute padding\n",
" ratio = r, r # width, height ratios\n",
" new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n",
" dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n",
" if auto: # minimum rectangle\n",
" dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding\n",
" elif scale_fill: # stretch\n",
" dw, dh = 0.0, 0.0\n",
" new_unpad = (new_shape[1], new_shape[0])\n",
" ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n",
"\n",
" dw /= 2 # divide padding into 2 sides\n",
" dh /= 2\n",
"\n",
" if shape[::-1] != new_unpad: # resize\n",
" img = cv.resize(img, new_unpad, interpolation=cv.INTER_LINEAR)\n",
" top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n",
" left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n",
" img = cv.copyMakeBorder(img, top, bottom, left, right, cv.BORDER_CONSTANT, value=color) # add border\n",
" return img, ratio, (dw, dh)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "5d01c15e-7dcc-4cec-87b0-a338e41051e4",
"metadata": {},
"outputs": [],
"source": [
"def preprocess_image(img0: np.ndarray):\n",
" \"\"\"\n",
" Preprocess image according to YOLOv8 input requirements.\n",
" Takes image in np.array format, resizes it to specific size using letterbox resize and changes data layout from HWC to CHW.\n",
"\n",
" Parameters:\n",
" img0 (np.ndarray): image for preprocessing\n",
" Returns:\n",
" img (np.ndarray): image after preprocessing\n",
" \"\"\"\n",
" # resize\n",
" img = letterbox(img0)[0]\n",
"\n",
" # Convert HWC to CHW\n",
" img = img.transpose(2, 0, 1)\n",
" img = np.ascontiguousarray(img)\n",
" return img"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "5d99be3c-8c3d-4c4c-b82e-17b6724d2258",
"metadata": {},
"outputs": [],
"source": [
"def image_to_tensor(image:np.ndarray):\n",
" \"\"\"\n",
" Preprocess image according to YOLOv8 input requirements.\n",
" Takes image in np.array format, resizes it to specific size using letterbox resize and changes data layout from HWC to CHW.\n",
"\n",
" Parameters:\n",
" img (np.ndarray): image for preprocessing\n",
" Returns:\n",
" input_tensor (np.ndarray): input tensor in NCHW format with float32 values in [0, 1] range\n",
" \"\"\"\n",
" input_tensor = image.astype(np.float32) # uint8 to fp32\n",
" input_tensor /= 255.0 # 0 - 255 to 0.0 - 1.0\n",
"\n",
" # add batch dimension\n",
" if input_tensor.ndim == 3:\n",
" input_tensor = np.expand_dims(input_tensor, 0)\n",
" return input_tensor"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "c1867e4e-1b99-431a-9d07-0389ae47d6db",
"metadata": {},
"outputs": [],
"source": [
"def postprocess(\n",
" pred_boxes,\n",
" input_hw,\n",
" orig_img,\n",
" min_conf_threshold=0.25,\n",
" nms_iou_threshold=0.7,\n",
" agnosting_nms=False,\n",
" max_detections=300,\n",
"):\n",
" \"\"\"\n",
" YOLOv8 model postprocessing function. Applied non maximum supression algorithm to detections and rescale boxes to original image size\n",
" Parameters:\n",
" pred_boxes (np.ndarray): model output prediction boxes\n",
" input_hw (np.ndarray): preprocessed image\n",
" orig_image (np.ndarray): image before preprocessing\n",
" min_conf_threshold (float, *optional*, 0.25): minimal accepted confidence for object filtering\n",
" nms_iou_threshold (float, *optional*, 0.45): minimal overlap score for removing objects duplicates in NMS\n",
" agnostic_nms (bool, *optiona*, False): apply class agnostinc NMS approach or not\n",
" max_detections (int, *optional*, 300): maximum detections after NMS\n",
" Returns:\n",
" pred (List[Dict[str, np.ndarray]]): list of dictionary with det - detected boxes in format [x1, y1, x2, y2, score, label]\n",
" \"\"\"\n",
" nms_kwargs = {\"agnostic\": agnosting_nms, \"max_det\":max_detections}\n",
" preds = ops.non_max_suppression(\n",
" torch.from_numpy(pred_boxes),\n",
" min_conf_threshold,\n",
" nms_iou_threshold,\n",
" nc=3,\n",
" **nms_kwargs\n",
" )\n",
"\n",
" results = []\n",
" for i, pred in enumerate(preds):\n",
" shape = orig_img[i].shape if isinstance(orig_img, list) else orig_img.shape\n",
" if not len(pred):\n",
" results.append({\"det\": [], \"segment\": []})\n",
" continue\n",
" pred[:, :4] = ops.scale_boxes(input_hw, pred[:, :4], shape).round()\n",
" results.append({\"det\": pred})\n",
"\n",
" return results"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "1940d393-1e89-46cd-9f12-b965487e1874",
"metadata": {},
"outputs": [],
"source": [
"def draw_results(results, source_image, label_map):\n",
" \"\"\"\n",
" Helper function for drawing bounding boxes on image\n",
" Parameters:\n",
" image_res (np.ndarray): detection predictions in format [x1, y1, x2, y2, score, label_id]\n",
" source_image (np.ndarray): input image for drawing\n",
" label_map; (Dict[int, str]): label_id to class name mapping\n",
" Returns:\n",
" Image with boxes\n",
" \"\"\"\n",
" boxes = results[\"det\"]\n",
" for idx, (*xyxy, conf, lbl) in enumerate(boxes):\n",
" label = f'{label_map[int(lbl)]} {conf:.2f}'\n",
" source_image = plot_one_box(xyxy, source_image, label=label, color=colors(int(lbl)), line_thickness=1)\n",
" return source_image"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "c599c49c-52af-4d0e-bad7-20c25fa2c851",
"metadata": {},
"outputs": [],
"source": [
"def plot_one_box(box, img,\n",
" color,\n",
" label, \n",
" line_thickness=5):\n",
" \"\"\"\n",
" Helper function for drawing single bounding box on image\n",
" Parameters:\n",
" x (np.ndarray): bounding box coordinates in format [x1, y1, x2, y2]\n",
" img (no.ndarray): input image\n",
" color (Tuple[int, int, int], *optional*, None): color in BGR format for drawing box, if not specified will be selected randomly\n",
" label (str, *optonal*, None): box label string, if not provided will not be provided as drowing result\n",
" line_thickness (int, *optional*, 5): thickness for box drawing lines\n",
" \"\"\"\n",
" # Plots one bounding box on image img\n",
" tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness\n",
" color = color or [random.randint(0, 255) for _ in range(3)]\n",
" c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))\n",
" cv.rectangle(img, c1, c2, color, thickness=tl, lineType=cv.LINE_AA)\n",
" if label:\n",
" tf = max(tl - 1, 1) # font thickness\n",
" t_size = cv.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n",
" c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\n",
" cv.rectangle(img, c1, c2, color, -1, cv.LINE_AA) # filled\n",
" cv.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv.LINE_AA)\n",
"\n",
" return img"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "ed0b7672-e2d1-47e6-8de8-07ffbb793a53",
"metadata": {},
"outputs": [],
"source": [
"def read_label(label_path):\n",
" with open(label_path, 'r') as f:\n",
" labels = f.read().split()\n",
" return labels"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "9e24f99f-cf0c-4063-aedd-574eea9fe18c",
"metadata": {},
"outputs": [],
"source": [
"label_path = \"labels.txt\"\n",
"image_path = \"test1.jpg\"\n",
"yoloe_model_path = \"yolov8n/best.xml\"\n",
"label_map = read_label(label_path)\n",
"core = ov.Core()\n",
"compiled_model = core.compile_model(yoloe_model_path, \"CPU\")\n",
"cap = cv.VideoCapture(0)\n",
"while cap.isOpened():\n",
" ret, frame = cap.read()\n",
" frame = cv.flip(frame, 180)\n",
" cv.namedWindow(\"MaskDetection\", 0) # 0可调大小注意窗口名必须imshow里面的一窗口名一直\n",
" cv.resizeWindow(\"MaskDetection\", 640, 480) # 设置长和宽\n",
" preprocessed_image = preprocess_image(frame)\n",
" input_tensor = image_to_tensor(preprocessed_image)\n",
" result = compiled_model(input_tensor)\n",
" detections = postprocess(pred_boxes=result[compiled_model.output(0)], input_hw=input_tensor.shape[2:], orig_img=frame)[0]\n",
" image_with_boxes = draw_results(detections, frame, label_map)\n",
" cv.imshow('MaskDetection', image_with_boxes)\n",
" key = cv.waitKey(1)\n",
" if key == 27: #esc退出\n",
" break\n",
"cap.release()\n",
"cv.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7e2a8aab-cf82-4bf8-80d3-0cded72f9493",
"metadata": {},
"outputs": [],
"source": [
"cap = cv.VideoCapture(0)\n",
"ret, frame = cap.read()\n",
"curr_frame = preprocess_image(frame)\n",
"curr_fram = image_to_tensor(curr_frame)\n",
"curr_request.set_tensor(compiled_model.input(0), ov.Tensor(curr_frame))\n",
"curr_request.start_async()\n",
"while cap.isOpened():\n",
" ret, next_frame = cap.read()\n",
" next_frame = cv.flip(next_frame, 180)\n",
" cv.namedWindow(\"MaskDetection\", 0) # 0可调大小注意窗口名必须imshow里面的一窗口名一直\n",
" cv.resizeWindow(\"MaskDetection\", 640, 480) # 设置长和宽\n",
" in_frame = preprocess_image(next_frame)\n",
" in_frame = image_to_tensor(in_frame)\n",
" next_request.set_tensor(input_layer, ov.Tensor(in_frame))\n",
" next_request.start_async()\n",
" if curr_request.wait_for(-1) == 1:\n",
" boxes_name = curr_request.get_output_tensor(0).data\n",
" conf_name = curr_request.get_output_tensor(1).data\n",
" boxes, scores, classes = process_result(box_results=boxes_name, conf_results=conf_name)\n",
" frame = draw_box(image=frame, boxes=boxes, scores=scores, classes=classes, labels=labels)\n",
" cv.imshow('MaskDetection', frame)\n",
" frame = next_frame\n",
" curr_request, next_request = next_request, curr_request\n",
" key = cv.waitKey(1)\n",
" if key == 27: #esc退出\n",
" break\n",
"cap.release()\n",
"cv.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,380 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "8d41befc-7e65-4110-9118-35dce6e6ab0c",
"metadata": {},
"outputs": [],
"source": [
"from openvino.runtime import Core\n",
"import openvino.runtime as ov\n",
"import cv2 as cv\n",
"import numpy as np\n",
"from PIL import Image\n",
"from ultralytics.yolo.utils import ops\n",
"import torch\n",
"from ultralytics.yolo.utils.plotting import colors"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "cd01038a-fe1a-4b47-ad49-b0641afdaee5",
"metadata": {},
"outputs": [],
"source": [
"def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=False, scale_fill=False, scaleup=False, stride=32):\n",
" \"\"\"\n",
" Resize image and padding for detection. Takes image as input,\n",
" resizes image to fit into new shape with saving original aspect ratio and pads it to meet stride-multiple constraints\n",
"\n",
" Parameters:\n",
" img (np.ndarray): image for preprocessing\n",
" new_shape (Tuple(int, int)): image size after preprocessing in format [height, width]\n",
" color (Tuple(int, int, int)): color for filling padded area\n",
" auto (bool): use dynamic input size, only padding for stride constrins applied\n",
" scale_fill (bool): scale image to fill new_shape\n",
" scaleup (bool): allow scale image if it is lower then desired input size, can affect model accuracy\n",
" stride (int): input padding stride\n",
" Returns:\n",
" img (np.ndarray): image after preprocessing\n",
" ratio (Tuple(float, float)): hight and width scaling ratio\n",
" padding_size (Tuple(int, int)): height and width padding size\n",
"\n",
"\n",
" \"\"\"\n",
" # Resize and pad image while meeting stride-multiple constraints\n",
" shape = img.shape[:2] # current shape [height, width]\n",
" if isinstance(new_shape, int):\n",
" new_shape = (new_shape, new_shape)\n",
"\n",
" # Scale ratio (new / old)\n",
" r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n",
" if not scaleup: # only scale down, do not scale up (for better test mAP)\n",
" r = min(r, 1.0)\n",
"\n",
" # Compute padding\n",
" ratio = r, r # width, height ratios\n",
" new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n",
" dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n",
" if auto: # minimum rectangle\n",
" dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding\n",
" elif scale_fill: # stretch\n",
" dw, dh = 0.0, 0.0\n",
" new_unpad = (new_shape[1], new_shape[0])\n",
" ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n",
"\n",
" dw /= 2 # divide padding into 2 sides\n",
" dh /= 2\n",
"\n",
" if shape[::-1] != new_unpad: # resize\n",
" img = cv.resize(img, new_unpad, interpolation=cv.INTER_LINEAR)\n",
" top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n",
" left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n",
" img = cv.copyMakeBorder(img, top, bottom, left, right, cv.BORDER_CONSTANT, value=color) # add border\n",
" return img, ratio, (dw, dh)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "5d01c15e-7dcc-4cec-87b0-a338e41051e4",
"metadata": {},
"outputs": [],
"source": [
"def preprocess_image(img0: np.ndarray):\n",
" \"\"\"\n",
" Preprocess image according to YOLOv8 input requirements.\n",
" Takes image in np.array format, resizes it to specific size using letterbox resize and changes data layout from HWC to CHW.\n",
"\n",
" Parameters:\n",
" img0 (np.ndarray): image for preprocessing\n",
" Returns:\n",
" img (np.ndarray): image after preprocessing\n",
" \"\"\"\n",
" # resize\n",
" img = letterbox(img0)[0]\n",
"\n",
" # Convert HWC to CHW\n",
" img = img.transpose(2, 0, 1)\n",
" img = np.ascontiguousarray(img)\n",
" return img"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "5d99be3c-8c3d-4c4c-b82e-17b6724d2258",
"metadata": {},
"outputs": [],
"source": [
"def image_to_tensor(image:np.ndarray):\n",
" \"\"\"\n",
" Preprocess image according to YOLOv8 input requirements.\n",
" Takes image in np.array format, resizes it to specific size using letterbox resize and changes data layout from HWC to CHW.\n",
"\n",
" Parameters:\n",
" img (np.ndarray): image for preprocessing\n",
" Returns:\n",
" input_tensor (np.ndarray): input tensor in NCHW format with float32 values in [0, 1] range\n",
" \"\"\"\n",
" input_tensor = image.astype(np.float32) # uint8 to fp32\n",
" input_tensor /= 255.0 # 0 - 255 to 0.0 - 1.0\n",
"\n",
" # add batch dimension\n",
" if input_tensor.ndim == 3:\n",
" input_tensor = np.expand_dims(input_tensor, 0)\n",
" return input_tensor"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "c1867e4e-1b99-431a-9d07-0389ae47d6db",
"metadata": {},
"outputs": [],
"source": [
"def postprocess(\n",
" pred_boxes,\n",
" input_hw,\n",
" orig_img,\n",
" min_conf_threshold=0.25,\n",
" nms_iou_threshold=0.7,\n",
" agnosting_nms=False,\n",
" max_detections=300,\n",
"):\n",
" \"\"\"\n",
" YOLOv8 model postprocessing function. Applied non maximum supression algorithm to detections and rescale boxes to original image size\n",
" Parameters:\n",
" pred_boxes (np.ndarray): model output prediction boxes\n",
" input_hw (np.ndarray): preprocessed image\n",
" orig_image (np.ndarray): image before preprocessing\n",
" min_conf_threshold (float, *optional*, 0.25): minimal accepted confidence for object filtering\n",
" nms_iou_threshold (float, *optional*, 0.45): minimal overlap score for removing objects duplicates in NMS\n",
" agnostic_nms (bool, *optiona*, False): apply class agnostinc NMS approach or not\n",
" max_detections (int, *optional*, 300): maximum detections after NMS\n",
" Returns:\n",
" pred (List[Dict[str, np.ndarray]]): list of dictionary with det - detected boxes in format [x1, y1, x2, y2, score, label]\n",
" \"\"\"\n",
" nms_kwargs = {\"agnostic\": agnosting_nms, \"max_det\":max_detections}\n",
" preds = ops.non_max_suppression(\n",
" torch.from_numpy(pred_boxes),\n",
" min_conf_threshold,\n",
" nms_iou_threshold,\n",
" nc=3,\n",
" **nms_kwargs\n",
" )\n",
"\n",
" results = []\n",
" for i, pred in enumerate(preds):\n",
" shape = orig_img[i].shape if isinstance(orig_img, list) else orig_img.shape\n",
" if not len(pred):\n",
" results.append({\"det\": [], \"segment\": []})\n",
" continue\n",
" pred[:, :4] = ops.scale_boxes(input_hw, pred[:, :4], shape).round()\n",
" results.append({\"det\": pred})\n",
"\n",
" return results"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "1940d393-1e89-46cd-9f12-b965487e1874",
"metadata": {},
"outputs": [],
"source": [
"def draw_results(results, source_image, label_map):\n",
" \"\"\"\n",
" Helper function for drawing bounding boxes on image\n",
" Parameters:\n",
" image_res (np.ndarray): detection predictions in format [x1, y1, x2, y2, score, label_id]\n",
" source_image (np.ndarray): input image for drawing\n",
" label_map; (Dict[int, str]): label_id to class name mapping\n",
" Returns:\n",
" Image with boxes\n",
" \"\"\"\n",
" boxes = results[\"det\"]\n",
" for idx, (*xyxy, conf, lbl) in enumerate(boxes):\n",
" label = f'{label_map[int(lbl)]} {conf:.2f}'\n",
" source_image = plot_one_box(xyxy, source_image, label=label, color=colors(int(lbl)), line_thickness=1)\n",
" return source_image"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "c599c49c-52af-4d0e-bad7-20c25fa2c851",
"metadata": {},
"outputs": [],
"source": [
"def plot_one_box(box, img,\n",
" color,\n",
" label, \n",
" line_thickness=5):\n",
" \"\"\"\n",
" Helper function for drawing single bounding box on image\n",
" Parameters:\n",
" x (np.ndarray): bounding box coordinates in format [x1, y1, x2, y2]\n",
" img (no.ndarray): input image\n",
" color (Tuple[int, int, int], *optional*, None): color in BGR format for drawing box, if not specified will be selected randomly\n",
" label (str, *optonal*, None): box label string, if not provided will not be provided as drowing result\n",
" line_thickness (int, *optional*, 5): thickness for box drawing lines\n",
" \"\"\"\n",
" # Plots one bounding box on image img\n",
" tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness\n",
" color = color or [random.randint(0, 255) for _ in range(3)]\n",
" c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))\n",
" cv.rectangle(img, c1, c2, color, thickness=tl, lineType=cv.LINE_AA)\n",
" if label:\n",
" tf = max(tl - 1, 1) # font thickness\n",
" t_size = cv.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n",
" c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\n",
" cv.rectangle(img, c1, c2, color, -1, cv.LINE_AA) # filled\n",
" cv.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv.LINE_AA)\n",
"\n",
" return img"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "ed0b7672-e2d1-47e6-8de8-07ffbb793a53",
"metadata": {},
"outputs": [],
"source": [
"def read_label(label_path):\n",
" with open(label_path, 'r') as f:\n",
" labels = f.read().split()\n",
" return labels"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "531a9cbd-3613-4c41-b3f2-be900a5de3d3",
"metadata": {},
"outputs": [],
"source": [
"label_path = \"labels.txt\"\n",
"yoloe_model_path = \"yolov9c/best.xml\"\n",
"label_map = read_label(label_path)\n",
"core = ov.Core()\n",
"compiled_model = core.compile_model(yoloe_model_path, \"CPU\")"
]
},
{
"cell_type": "markdown",
"id": "321e9911-30a5-44dd-810b-4b41ed5154b8",
"metadata": {},
"source": [
"label_path = \"labels.txt\"\n",
"yoloe_model_path = \"yolov9c/best.xml\"\n",
"label_map = read_label(label_path)\n",
"core = ov.Core()\n",
"compiled_model = core.compile_model(yoloe_model_path, \"GPU\")\n",
"cap = cv.VideoCapture(0)\n",
"while cap.isOpened():\n",
" ret, frame = cap.read()\n",
" frame = cv.flip(frame, 180)\n",
" cv.namedWindow(\"MaskDetection\", 0) # 0可调大小注意窗口名必须imshow里面的一窗口名一直\n",
" cv.resizeWindow(\"MaskDetection\", 640, 480) # 设置长和宽\n",
" preprocessed_image = preprocess_image(frame)\n",
" input_tensor = image_to_tensor(preprocessed_image)\n",
" result = compiled_model(input_tensor)\n",
" detections = postprocess(pred_boxes=result[compiled_model.output(0)], input_hw=input_tensor.shape[2:], orig_img=frame)[0]\n",
" image_with_boxes = draw_results(detections, frame, label_map)\n",
" cv.imshow('MaskDetection', image_with_boxes)\n",
" key = cv.waitKey(1)\n",
" if key == 27: #esc退出\n",
" break\n",
"cap.release()\n",
"cv.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "7e2a8aab-cf82-4bf8-80d3-0cded72f9493",
"metadata": {},
"outputs": [
{
"ename": "RuntimeError",
"evalue": "Exception from src\\inference\\src\\cpp\\infer_request.cpp:66:\nException from src\\plugins\\intel_cpu\\src\\infer_request.cpp:378:\nParameterMismatch: Failed to set tensor for input with precision: u8, since the model input tensor precision is: f32\n\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[10], line 9\u001b[0m\n\u001b[0;32m 7\u001b[0m input_tensor \u001b[38;5;241m=\u001b[39m compiled_model\u001b[38;5;241m.\u001b[39minput(\u001b[38;5;241m0\u001b[39m)\n\u001b[0;32m 8\u001b[0m input_tensor\u001b[38;5;241m.\u001b[39mprecision \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFP32\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m----> 9\u001b[0m \u001b[43mcurr_request\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mset_tensor\u001b[49m\u001b[43m(\u001b[49m\u001b[43minput_tensor\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mov\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mTensor\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcurr_frame\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 10\u001b[0m curr_request\u001b[38;5;241m.\u001b[39mstart_async()\n\u001b[0;32m 11\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m cap\u001b[38;5;241m.\u001b[39misOpened():\n",
"\u001b[1;31mRuntimeError\u001b[0m: Exception from src\\inference\\src\\cpp\\infer_request.cpp:66:\nException from src\\plugins\\intel_cpu\\src\\infer_request.cpp:378:\nParameterMismatch: Failed to set tensor for input with precision: u8, since the model input tensor precision is: f32\n\n"
]
}
],
"source": [
"cap = cv.VideoCapture(0)\n",
"ret, frame = cap.read()\n",
"curr_frame = preprocess_image(frame)\n",
"curr_fram = image_to_tensor(curr_frame)\n",
"curr_request = compiled_model.create_infer_request()\n",
"next_request = compiled_model.create_infer_request()\n",
"input_tensor = compiled_model.input(0)\n",
"input_tensor.precision = \"FP32\"\n",
"curr_request.set_tensor(input_tensor, ov.Tensor(curr_frame))\n",
"curr_request.start_async()\n",
"while cap.isOpened():\n",
" ret, next_frame = cap.read()\n",
" next_frame = cv.flip(next_frame, 180)\n",
" cv.namedWindow(\"MaskDetection\", 0) # 0可调大小注意窗口名必须imshow里面的一窗口名一直\n",
" cv.resizeWindow(\"MaskDetection\", 640, 480) # 设置长和宽\n",
" in_frame = preprocess_image(next_frame)\n",
" in_frame = image_to_tensor(in_frame)\n",
" input_tensor = compiled_model.input(0)\n",
" input_tensor.precision = \"FP32\"\n",
" next_request.set_tensor(input_tensor, ov.Tensor(in_frame))\n",
" next_request.start_async()\n",
" if curr_request.wait_for(-1) == 1:\n",
" result = curr_request.get_output_tensor(0).data\n",
" detections = postprocess(pred_boxes=result[compiled_model.output(0)], input_hw=in_frame.shape[2:], orig_img=next_frame)[0]\n",
" image_with_boxes = draw_results(detections, next_frame, label_map)\n",
" cv.imshow('MaskDetection', image_with_boxes)\n",
" frame = next_frame\n",
" curr_request, next_request = next_request, curr_request\n",
" key = cv.waitKey(1)\n",
" if key == 27: #esc退出\n",
" break\n",
"cap.release()\n",
"cv.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4cb86c95-195f-4c01-ba22-f569d1b3b964",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,3 @@
helmet
no_helmet
two_wheeler

Binary file not shown.

After

Width:  |  Height:  |  Size: 264 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

File diff suppressed because it is too large Load Diff

@ -0,0 +1,16 @@
description: Ultralytics best model trained on /usr/local/lib/python3.10/dist-packages/ultralytics/cfg/datasets/VOC.yaml
author: Ultralytics
date: '2024-03-12T05:17:45.067881'
version: 8.1.27
license: AGPL-3.0 License (https://ultralytics.com/license)
docs: https://docs.ultralytics.com
stride: 32
task: detect
batch: 1
imgsz:
- 640
- 640
names:
0: helmet
1: no_helmet
2: two_wheeler

Binary file not shown.

File diff suppressed because it is too large Load Diff

@ -0,0 +1,16 @@
description: Ultralytics YOLOv9c model trained on /usr/local/lib/python3.10/dist-packages/ultralytics/cfg/datasets/VOC.yaml
author: Ultralytics
date: '2024-03-14T10:12:52.971785'
version: 8.1.27
license: AGPL-3.0 License (https://ultralytics.com/license)
docs: https://docs.ultralytics.com
stride: 32
task: detect
batch: 1
imgsz:
- 640
- 640
names:
0: helmet
1: no_helmet
2: two_wheeler
Loading…
Cancel
Save