compressed_oxygen_detect.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. import torch
  2. import cv2
  3. import threading
  4. from datetime import datetime
  5. from ultralytics import YOLO
  6. from globals import stop_event,redis_client,steps,hand_box,head_box
  7. from config import VIDEO_SOURCE,MODEL_PATH
  8. def init_compressed_oxygen_detection():
  9. for i, step in enumerate(steps):
  10. redis_client.set(f"compressed_oxygen_step_{i+1}",'False')
  11. redis_client.delete("compressed_oxygen_order")
  12. def start_compressed_oxygen_detection(start_events):
  13. event = threading.Event()
  14. start_events.append(event)
  15. thread = threading.Thread(target=process_video, args=(MODEL_PATH,VIDEO_SOURCE,event))
  16. thread.daemon=True
  17. thread.start()
  18. thread.join()
  19. def IoU(box1, box2):
  20. '''
  21. 计算两个矩形框的交并比
  22. :param box1: list,第一个矩形框的左上角和右下角坐标
  23. :param box2: list,第二个矩形框的左上角和右下角坐标
  24. :return: 两个矩形框的交并比iou
  25. '''
  26. x1 = max(box1[0], box2[0]) # 交集左上角x
  27. x2 = min(box1[2], box2[2]) # 交集右下角x
  28. y1 = max(box1[1], box2[1]) # 交集左上角y
  29. y2 = min(box1[3], box2[3]) # 交集右下角y
  30. overlap = max(0., x2-x1) * max(0., y2-y1)
  31. union = (box1[2]-box1[0]) * (box1[3]-box1[1]) \
  32. + (box2[2]-box2[0]) * (box2[3]-box2[1]) \
  33. - overlap
  34. return overlap/union
  35. def process_video(model_path, video_source, start_event):
  36. model = YOLO(model_path)
  37. cap = cv2.VideoCapture(video_source)
  38. while cap.isOpened():
  39. # Read a frame from the video
  40. success, frame = cap.read()
  41. if stop_event.is_set():#控制停止推理
  42. break
  43. if success:
  44. if cap.get(cv2.CAP_PROP_POS_FRAMES) % 10 != 0:#跳帧检测,
  45. continue
  46. global step,hand_box,head_box
  47. results = model.predict(frame,conf=0.2,verbose=False)
  48. for r in results:
  49. boxes = r.boxes.xyxy # 提取所有检测到的边界框坐标
  50. confidences = r.boxes.conf # 提取所有检测到的置信度
  51. classes = r.boxes.cls # 提取所有检测到的类别索引
  52. head_box=[0,0,0,0]
  53. hand_box=[0,0,0,0]
  54. step4_flag=False
  55. air_make_up_flag=False
  56. for i in range(len(boxes)):
  57. x1, y1, x2, y2 = boxes[i].tolist()
  58. confidence = confidences[i].item()
  59. cls = int(classes[i].item())
  60. label = model.names[cls]
  61. #print("label:",label)
  62. if(label=='head'):head_box=[x1,y1,x2,y2]
  63. if(label=='hand'):hand_box=[x1,y1,x2,y2]
  64. if(label=='aerostat_gasbag'):
  65. if steps[0]==False:
  66. steps[0]=True
  67. print("steps[0]:",steps[0])
  68. if(IoU(head_box,[x1,y1,x2,y2])>0 and steps[2]==False):
  69. steps[2]=True
  70. print("steps[2]:",steps[2])
  71. if(IoU(hand_box,[x1,y1,x2,y2])>0 and steps[4]==False):
  72. step4_flag=True
  73. #steps[4]=True
  74. #print("steps[4]:",steps[4])
  75. if(label=='neckband'):
  76. if(IoU(head_box,[x1,y1,x2,y2])>0 and steps[1]==False and steps[0]==True):
  77. steps[1]=True
  78. print("steps[1]:",steps[1])
  79. if(label=='valve'):
  80. if(IoU(hand_box,[x1,y1,x2,y2])>0 and steps[3]==False):
  81. steps[3]=True
  82. print("steps[3]:",steps[3])
  83. if(label=='air make-up'):
  84. air_make_up_flag=True
  85. if steps[0]==False:
  86. steps[0]=True
  87. print("steps[0]:",steps[0])
  88. if(label=='nose clip' and steps[5]==False):
  89. steps[5]=True
  90. print("steps[5]:",steps[5])
  91. if step4_flag and not air_make_up_flag and steps[3]:
  92. steps[4]=True
  93. print("steps[4]:",steps[4])
  94. if head_box != [0, 0, 0, 0] and hand_box != [0, 0, 0, 0] and IoU(head_box, hand_box) > 0 and steps[5] == False:
  95. steps[5]=True
  96. print("steps[5]:",steps[5])
  97. for i, step in enumerate(steps):
  98. if step and redis_client.get(f"compressed_oxygen_step_{i+1}")=='False':
  99. redis_client.rpush("compressed_oxygen_order", f"{i+1}")
  100. redis_client.set(f"compressed_oxygen_step_{i+1}",'True')
  101. start_event.set()
  102. else:
  103. # Break the loop if the end of the video is reached
  104. break
  105. # Release the video capture object and close the display window
  106. cap.release()
  107. if torch.cuda.is_available():
  108. torch.cuda.empty_cache()
  109. del model