compressed_oxygen_detect.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. import torch
  2. import cv2
  3. import threading
  4. from datetime import datetime
  5. from ultralytics import YOLO
  6. from globals import stop_event,redis_client,steps,hand_box,head_box
  7. from config import VIDEO_SOURCE,MODEL_PATH
  8. def init_compressed_oxygen_detection():
  9. for i, step in enumerate(steps):
  10. redis_client.set(f"compressed_oxygen_step_{i+1}",'False')
  11. def start_compressed_oxygen_detection(start_events):
  12. event = threading.Event()
  13. start_events.append(event)
  14. thread = threading.Thread(target=process_video, args=(MODEL_PATH,VIDEO_SOURCE,event))
  15. thread.daemon=True
  16. thread.start()
  17. thread.join()
  18. def IoU(box1, box2):
  19. '''
  20. 计算两个矩形框的交并比
  21. :param box1: list,第一个矩形框的左上角和右下角坐标
  22. :param box2: list,第二个矩形框的左上角和右下角坐标
  23. :return: 两个矩形框的交并比iou
  24. '''
  25. x1 = max(box1[0], box2[0]) # 交集左上角x
  26. x2 = min(box1[2], box2[2]) # 交集右下角x
  27. y1 = max(box1[1], box2[1]) # 交集左上角y
  28. y2 = min(box1[3], box2[3]) # 交集右下角y
  29. overlap = max(0., x2-x1) * max(0., y2-y1)
  30. union = (box1[2]-box1[0]) * (box1[3]-box1[1]) \
  31. + (box2[2]-box2[0]) * (box2[3]-box2[1]) \
  32. - overlap
  33. return overlap/union
  34. def process_video(model_path, video_source, start_event):
  35. model = YOLO(model_path)
  36. cap = cv2.VideoCapture(video_source)
  37. while cap.isOpened():
  38. # Read a frame from the video
  39. success, frame = cap.read()
  40. if stop_event.is_set():#控制停止推理
  41. break
  42. if success:
  43. if cap.get(cv2.CAP_PROP_POS_FRAMES) % 10 != 0:#跳帧检测,
  44. continue
  45. global step,hand_box,head_box
  46. results = model.predict(frame,conf=0.6,verbose=False)
  47. for r in results:
  48. boxes = r.boxes.xyxy # 提取所有检测到的边界框坐标
  49. confidences = r.boxes.conf # 提取所有检测到的置信度
  50. classes = r.boxes.cls # 提取所有检测到的类别索引
  51. head_box=[0,0,0,0]
  52. hand_box=[0,0,0,0]
  53. for i in range(len(boxes)):
  54. x1, y1, x2, y2 = boxes[i].tolist()
  55. confidence = confidences[i].item()
  56. cls = int(classes[i].item())
  57. label = model.names[cls]
  58. #print("label:",label)
  59. if(label=='head'):head_box=[x1,y1,x2,y2]
  60. if(label=='hand'):hand_box=[x1,y1,x2,y2]
  61. if(label=='aerostat_gasbag'):
  62. steps[0]=True
  63. print("steps[0]:",steps[0])
  64. if(IoU(head_box,[x1,y1,x2,y2])>0.1):
  65. steps[2]=True
  66. print("steps[2]:",steps[2])
  67. if(label=='neckband'):
  68. if(IoU(head_box,[x1,y1,x2,y2])>0.1):
  69. steps[1]=True
  70. print("steps[1]:",steps[1])
  71. if(label=='valve'):
  72. if(IoU(hand_box,[x1,y1,x2,y2])>0.1):
  73. steps[2]=True
  74. print("steps[2]:",steps[2])
  75. if(label=='air make-up'):
  76. steps[0]=True
  77. if(IoU(hand_box,[x1,y1,x2,y2])>0.1):
  78. steps[4]=True
  79. print("steps[4]:",steps[4])
  80. if(label=='nose clip'):
  81. if(IoU(head_box,[x1,y1,x2,y2])>0.1):
  82. steps[5]=True
  83. print("steps[5]:",steps[5])
  84. for i, step in enumerate(steps):
  85. if step and redis_client.get(f"compressed_oxygen_step_{i+1}")=='False':
  86. redis_client.rpush("compressed_oxygen_order", f"{i+1}")
  87. redis_client.set(f"compressed_oxygen_step_{i+1}",'True')
  88. start_event.set()
  89. else:
  90. # Break the loop if the end of the video is reached
  91. break
  92. # Release the video capture object and close the display window
  93. cap.release()
  94. if torch.cuda.is_available():
  95. torch.cuda.empty_cache()
  96. del model