welding_wearing_detect.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. import torch
  2. import cv2
  3. import threading
  4. from datetime import datetime
  5. from ultralytics import YOLO
  6. from globals import stop_event,redis_client
  7. from config import SAVE_IMG_PATH,POST_IMG_PATH1,WELDING_WEARING_MODEL,WELDING_WEARING_VIDEO_SOURCES
  8. def init_wearing_detection():
  9. redis_client.set("welding_wearing_human_in_postion",'False')
  10. redis_client.delete("welding_wearing_items_nums")
  11. redis_client.delete("welding_wearing_detection_img")
  12. redis_client.set("welding_wearing_detection_img_flag",'False')
  13. def start_wearing_detection(start_events):
  14. # Create threads for each video stream and model
  15. threads = []
  16. for model_path in WELDING_WEARING_MODEL:
  17. event = threading.Event()
  18. start_events.append(event)
  19. thread = threading.Thread(target=process_video, args=(model_path,WELDING_WEARING_VIDEO_SOURCES,event))
  20. threads.append(thread)
  21. thread.daemon=True
  22. thread.start()
  23. # Wait for all threads to complete
  24. for thread in threads:
  25. thread.join()
  26. def process_video(model_path, video_source, start_event):
  27. model = YOLO(model_path)
  28. cap = cv2.VideoCapture(video_source)
  29. while cap.isOpened():
  30. # Read a frame from the video
  31. success, frame = cap.read()
  32. if stop_event.is_set():#控制停止推理
  33. break
  34. if success:
  35. if cap.get(cv2.CAP_PROP_POS_FRAMES) % 10 != 0:#跳帧检测,
  36. continue
  37. x, y, w, h = 786, 0, 385, 1000#剪裁画面的中心区域
  38. # Crop the frame to the ROI
  39. cropped_frame = frame[y:y+h, x:x+w]
  40. # Run YOLOv8 inference on the frame
  41. if model_path==WELDING_WEARING_MODEL[0]:#yolov8s,专门用来检测人
  42. #model.classes = [0]#设置只检测人一个类别
  43. results = model.predict(cropped_frame,conf=0.6,verbose=False,classes=[0])#这里的results是一个生成器
  44. else:
  45. results = model.predict(cropped_frame,conf=0.6,verbose=False)
  46. #while not stop_event.is_set():
  47. for r in results:
  48. ##下面这些都是tensor类型
  49. boxes = r.boxes.xyxy # 提取所有检测到的边界框坐标
  50. confidences = r.boxes.conf # 提取所有检测到的置信度
  51. classes = r.boxes.cls # 提取所有检测到的类别索引
  52. ###劳保,不在函数外部定义是因为需要每一帧重新赋值
  53. wearing_items={"pants" :0,
  54. 'jacket': 0,
  55. 'helmet': 0,
  56. 'gloves': 0,
  57. 'shoes': 0
  58. }
  59. for i in range(len(boxes)):
  60. x1, y1, x2, y2 = boxes[i].tolist()
  61. confidence = confidences[i].item()
  62. cls = int(classes[i].item())
  63. label = model.names[cls]
  64. # if x1 < WEAR_DETECTION_AREA[0] or y1 < WEAR_DETECTION_AREA[1] or x2 > WEAR_DETECTION_AREA[2] or y2 > WEAR_DETECTION_AREA[3]:
  65. # continue # 跳过不在区域内的检测框
  66. if model_path==WELDING_WEARING_MODEL[0]:#yolov8s,专门用来检测人
  67. if label=="person" and redis_client.get("welding_wearing_human_in_postion")=='False':
  68. redis_client.set("welding_wearing_human_in_postion",'True')
  69. else:
  70. wearing_items[label] += 1
  71. if model_path==WELDING_WEARING_MODEL[1]:
  72. welding_wearing_items_nums = [wearing_items["pants"], wearing_items["jacket"], wearing_items["helmet"], wearing_items["gloves"], wearing_items["shoes"]]
  73. if redis_client.exists("welding_wearing_items_nums"):
  74. redis_client.delete("welding_wearing_items_nums")
  75. redis_client.rpush("welding_wearing_items_nums", *welding_wearing_items_nums)
  76. if redis_client.get("welding_wearing_detection_img_flag")=='True' and not redis_client.exists("welding_wearing_detection_img"):
  77. save_time=datetime.now().strftime('%Y%m%d_%H%M')
  78. imgpath = f"{SAVE_IMG_PATH}/welding_wearing_detection_{save_time}.jpg"
  79. post_path= f"{POST_IMG_PATH1}/welding_wearing_detection_{save_time}.jpg"
  80. annotated_frame = results[0].plot()
  81. cv2.imwrite(imgpath, annotated_frame)
  82. redis_client.set("welding_wearing_detection_img",post_path)
  83. start_event.set()
  84. else:
  85. # Break the loop if the end of the video is reached
  86. break
  87. # Release the video capture object and close the display window
  88. cap.release()
  89. if torch.cuda.is_available():
  90. torch.cuda.empty_cache()
  91. del model