person_speed.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. import cv2
  2. import numpy as np
  3. from ultralytics import YOLO
  4. from collections import defaultdict, deque
  5. import datetime
  6. import time
  7. import math
  8. # Load the YOLO11 model
  9. model = YOLO("yolo11m.pt")
  10. # Open the video file
  11. video_path = r"E:\desktop_file\速度标定\run.mp4"
  12. # video_path = r"E:\wx_file\WeChat Files\wxid_1lcmt2w2jdwl22\FileStorage\File\2024-11\3.4-13时胶乳包装.mp4"
  13. cap = cv2.VideoCapture(video_path)
  14. # 存储最近的200帧用于回溯
  15. frame_buffer = deque(maxlen=200) # 新增帧缓冲区
  16. # Store the track history
  17. track_history = defaultdict(lambda: [])
  18. # 用于存储每个 track_id 最近的时间戳
  19. time_stamps = defaultdict(lambda: deque(maxlen=200)) # 固定长度为 50
  20. # 用于存储瞬时速度
  21. instantaneous_velocities = defaultdict(lambda: deque(maxlen=100))
  22. def apply_bias(position):
  23. """
  24. 偏置函数:使用 x/ln(1+x) 计算偏置
  25. 已弃用
  26. """
  27. x, y = position
  28. bias_x = np.log1p(x) if x > 0 else 0
  29. bias_y = np.log1p(y) if y > 0 else 0
  30. return np.array([bias_x, bias_y])
  31. def save_high_speed_video(buffer, trigger_time):
  32. """将缓冲区中的帧保存为MP4文件"""
  33. if len(buffer) < 1:
  34. return
  35. # 生成唯一文件名
  36. timestamp = trigger_time.strftime("%Y%m%d%H%M%S%f")
  37. output_path = f"high_speed_{timestamp}.mp4"
  38. # 使用MP4编码(需确保OpenCV支持)
  39. fourcc_mp4 = cv2.VideoWriter_fourcc(*'x264')
  40. writer = cv2.VideoWriter(output_path, fourcc_mp4, fps, (frame_width, frame_height))
  41. for frame in buffer:
  42. writer.write(frame)
  43. writer.release()
  44. def map_to_ellipse(position):
  45. x, y = position
  46. center_x = 640
  47. center_y = 360
  48. a = 580
  49. b = 280
  50. x_norm = x / 1280
  51. y_norm = y / 720
  52. d_norm = math.sqrt((x_norm - 0.5) ** 2 + (y_norm - 0.5) ** 2)
  53. theta_norm = math.atan2(y_norm - 0.5, x_norm - 0.5)
  54. f = d_norm
  55. a_new = a * f
  56. b_new = b * f
  57. bias_x = center_x + a_new * math.cos(theta_norm)
  58. bias_y = center_y + b_new * math.sin(theta_norm)
  59. return np.array([bias_x, bias_y])
  60. # 创建 VideoWriter 对象以保存输出视频
  61. fourcc = cv2.VideoWriter_fourcc(*'XVID') # 视频编码格式
  62. output_file = "output_video.avi" # 输出文件名
  63. fps = 25 # 帧率
  64. frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  65. frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  66. out = cv2.VideoWriter(output_file, fourcc, fps, (frame_width, frame_height))
  67. speed_threshold = 30 # 速度阈值
  68. high_velocity_count_threshold = 20 # 高速度计数阈值
  69. # Loop through the video frames
  70. while cap.isOpened():
  71. # 记录当前时间
  72. current_time = time.time()
  73. # Read a frame from the video
  74. success, frame = cap.read()
  75. if success:
  76. # 将当前帧加入缓冲区(深拷贝避免覆盖)
  77. frame_buffer.append(frame.copy()) # 新增
  78. # Run YOLO11 tracking on the frame, persisting tracks between frames
  79. results = model.track(frame, persist=True, classes=0, conf=0.6)
  80. if results[0].boxes and results[0].boxes.id is not None:
  81. # Get the boxes and track IDs
  82. boxes = results[0].boxes.xywh.cpu()
  83. track_ids = results[0].boxes.id.int().cpu().tolist()
  84. for box, track_id in zip(boxes, track_ids):
  85. x, y, w, h = box
  86. # 绘制边界框
  87. cv2.rectangle(frame, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), (0, 255, 0), 2)
  88. # 计算左下角坐标
  89. bottom_left_x = int(x - w / 2)
  90. bottom_left_y = int(y + h / 2)
  91. # 计算中心点
  92. center_x = int(x)
  93. center_y = int(y)
  94. # 绘制中心点
  95. cv2.circle(frame, (center_x, center_y), 5, (255, 0, 0), -1) # 红色中心点,半径为 5
  96. # 记录位置
  97. track_history[track_id].append((bottom_left_x, bottom_left_y))
  98. if len(track_history[track_id]) > 100:
  99. del track_history[track_id][:-50] # 维持历史长度
  100. # 记录每一帧的时间
  101. time_stamps[track_id].append(current_time)
  102. # 计算时间间隔
  103. if len(time_stamps[track_id]) > 1:
  104. delta_time = time_stamps[track_id][-1] - time_stamps[track_id][-2] # 最近两帧的时间差
  105. else:
  106. delta_time = 0
  107. instantaneous_velocity = 0
  108. # 计算二维瞬时速度
  109. if len(track_history[track_id]) >= 2:
  110. pos1 = np.array(track_history[track_id][-1]) # 最新位置
  111. pos2 = np.array(track_history[track_id][-2]) # 前一个位置
  112. pos1 = map_to_ellipse(pos1)
  113. pos2 = map_to_ellipse(pos2)
  114. distance = np.linalg.norm(pos1 - pos2)
  115. # 使用时间间隔进行速度计算
  116. instantaneous_velocity = distance / delta_time if delta_time > 0 else np.zeros(2)
  117. instantaneous_velocity_magnitude = round(np.linalg.norm(instantaneous_velocity), 1)
  118. instantaneous_velocities[track_id].append(instantaneous_velocity_magnitude)
  119. else:
  120. instantaneous_velocity_magnitude = 0
  121. # 判断是否有足够数量的高速度
  122. high_velocity_count = sum(1 for velocity in instantaneous_velocities[track_id] if velocity > speed_threshold)
  123. if high_velocity_count >= high_velocity_count_threshold:
  124. # 原逻辑:截图,标红
  125. # cv2.putText(frame, str(instantaneous_velocity_magnitude), (int(x), int(y)),
  126. # cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
  127. # data_time = str(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
  128. # file_name = "high_speed_" + data_time + ".jpg"
  129. # cv2.imwrite(file_name, frame)
  130. # 新增逻辑:删除超过 speed_threshold 的瞬时速度
  131. instantaneous_velocities[track_id] = deque(
  132. [velocity for velocity in instantaneous_velocities[track_id] if velocity <= speed_threshold],
  133. maxlen=100
  134. )
  135. # 新增保存视频逻辑
  136. data_time = datetime.datetime.now()
  137. save_high_speed_video(frame_buffer, data_time)
  138. else:
  139. cv2.putText(frame, str(instantaneous_velocity_magnitude), (int(x), int(y)),
  140. cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 121, 23), 2)
  141. # Save the annotated frame to the output video
  142. out.write(frame) # 将处理后的视频帧写入文件
  143. # Display the annotated frame
  144. cv2.imshow("YOLO11 Tracking", frame)
  145. # Break the loop if 'q' is pressed
  146. if cv2.waitKey(1) & 0xFF == ord("q"):
  147. break
  148. else:
  149. # Break the loop if the end of the video is reached
  150. break
  151. # Release the video capture object and close the display window
  152. cap.release()
  153. out.release() # 释放 VideoWriter 对象
  154. cv2.destroyAllWindows()