runthrbig.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
  2. """
  3. Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
  4. Usage - sources:
  5. $ python detect.py --weights yolov5s.pt --source 0 # webcam
  6. img.jpg # image
  7. vid.mp4 # video
  8. screen # screenshot
  9. path/ # directory
  10. list.txt # list of images
  11. list.streams # list of streams
  12. 'path/*.jpg' # glob
  13. 'https://youtu.be/Zgi9g1ksQHc' # YouTube
  14. 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
  15. Usage - formats:
  16. $ python detect.py --weights yolov5s.pt # PyTorch
  17. yolov5s.torchscript # TorchScript
  18. yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
  19. yolov5s_openvino_model # OpenVINO
  20. yolov5s.engine # TensorRT
  21. yolov5s.mlmodel # CoreML (macOS-only)
  22. yolov5s_saved_model # TensorFlow SavedModel
  23. yolov5s.pb # TensorFlow GraphDef
  24. yolov5s.tflite # TensorFlow Lite
  25. yolov5s_edgetpu.tflite # TensorFlow Edge TPU
  26. yolov5s_paddle_model # PaddlePaddle
  27. """
  28. from datetime import datetime
  29. from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
  30. from qwen_vl_utils import process_vision_info
  31. import matplotlib.path as mat
  32. import requests
  33. import argparse
  34. import os
  35. import platform
  36. import sqlite3
  37. import sys
  38. import threading
  39. import time
  40. from pathlib import Path
  41. import signal
  42. import torch
  43. from concurrent.futures import ThreadPoolExecutor
  44. from concurrent.futures import ProcessPoolExecutor
  45. from multiprocessing import Process,Manager,Value
  46. from multiprocessing import Queue
  47. from multiprocessing import set_start_method
  48. import multiprocessing
  49. import multiprocessing as mp
  50. import numpy as np
  51. import platform
  52. import pathlib
  53. from collections import defaultdict, deque
  54. #import websockets
  55. import asyncio
  56. FILE = Path(__file__).resolve()
  57. ROOT = FILE.parents[0] # YOLOv5 root directory
  58. if str(ROOT) not in sys.path:
  59. sys.path.append(str(ROOT)) # add ROOT to PATH
  60. ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
  61. import math
  62. from models.common import DetectMultiBackend
  63. from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLTN
  64. from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
  65. increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,strtolstl,apply_classifier1,task)
  66. from utils.plots import Annotator, colors, save_one_box
  67. from utils.torch_utils import select_device, smart_inference_mode
  68. #from testpool import func1,TestA
  69. from ultralytics import YOLO
  70. from ultralytics.trackers.bot_sort import BOTSORT
  71. from ultralytics.utils.checks import check_yaml
  72. from ultralytics.utils import IterableSimpleNamespace, yaml_load, ops
  73. from ultralytics.nn.autobackend import AutoBackend
  74. from ultralytics.utils.ops import non_max_suppression
  75. from ultralytics.engine.results import Results
  76. # def my_handler(signum, frame):
  77. # exit(0)
  78. #url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
  79. plt = platform.system()
  80. if plt != 'Windows':
  81. pathlib.WindowsPath = pathlib.PosixPath
  82. urlhead = "http://172.19.152.231"
  83. url = f"{urlhead}/open/api/operate/upload"
  84. urlele = f"{urlhead}/open/api/operate/fence"
  85. urlperson = f"{urlhead}/open/api/operate/getPersonLimitNum"
  86. urlt = f"{urlhead}/open/api/operate/taskList"
  87. urla = f"{urlhead}/open/api/operate/algorithmList"
  88. weburl = f"ws://36.7.84.146:28801/websocket/device"
  89. urlrtsp = f"{urlhead}/open/api/operate/previewURLs"
  90. personcountdir = {}
  91. algdir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'duty','98':'face','51':'run'}
  92. modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person'}
  93. modelalgdir = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51'}
  94. algmodel = {}
  95. for key,value in algdir.items():
  96. algmodel[value] = key
  97. def map_to_ellipse(position):
  98. x, y = position
  99. center_x = 640
  100. center_y = 360
  101. a = 580
  102. b = 280
  103. x_norm = x / 1280
  104. y_norm = y / 720
  105. d_norm = math.sqrt((x_norm - 0.5) ** 2 + (y_norm - 0.5) ** 2)
  106. theta_norm = math.atan2(y_norm - 0.5, x_norm - 0.5)
  107. f = d_norm
  108. a_new = a * f
  109. b_new = b * f
  110. bias_x = center_x + a_new * math.cos(theta_norm)
  111. bias_y = center_y + b_new * math.sin(theta_norm)
  112. return np.array([bias_x, bias_y])
  113. def parse_opt():
  114. parser = argparse.ArgumentParser()
  115. parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
  116. opt = parser.parse_args()
  117. return opt
  118. def runtracker(modelname,channel,source,videoqueue):
  119. track_history = defaultdict(lambda: [])
  120. # 用于存储每个 track_id 最近的时间戳
  121. time_stamps = defaultdict(lambda: deque(maxlen=50)) # 固定长度为 50
  122. # 用于存储瞬时速度
  123. instantaneous_velocities = defaultdict(lambda: deque(maxlen=10))
  124. speed_threshold = 10 # 速度阈值
  125. high_velocity_count_threshold = 30 # 高速度计数阈值
  126. dirmodel = {"channel":channel,"postpretime":0,"classIndex":'',"VideoUrl":source}
  127. dirvideo = {}
  128. framelist = []
  129. cap = cv2.VideoCapture(source)
  130. print(f'source = {source} cap = {cap.isOpened()}')
  131. model = YOLO(modelname)
  132. ida = 0
  133. while cap.isOpened():
  134. # 记录当前时间
  135. flag = False
  136. current_time = time.time()
  137. # Read a frame from the video
  138. success, frame = cap.read()
  139. if success:
  140. oriimg = frame.copy()
  141. if len(framelist) >= 100:
  142. framelist.pop(0)
  143. framelist.append(oriimg)
  144. else:
  145. framelist.append(oriimg)
  146. idtmp = list(dirvideo.keys())
  147. for ida in idtmp:
  148. if dirvideo[ida]['count']<=100:
  149. dirvideo[ida]['framel'].append(oriimg)
  150. dirvideo[ida]['count'] += 1
  151. print('addimg')
  152. if dirvideo[ida]['count'] ==100:
  153. print("appendvideo")
  154. videoqueue.put([dirvideo[ida]['framel'],dirvideo[ida]['postim'],dirvideo[ida]['oriim'],dirvideo[ida]['posttime'],channel])
  155. dirvideo.pop(ida)
  156. if success:
  157. # Run YOLO11 tracking on the frame, persisting tracks between frames
  158. results = model.track(frame, persist=True, classes=0, conf=0.6)
  159. if results[0].boxes and results[0].boxes.id is not None:
  160. # Get the boxes and track IDs
  161. boxes = results[0].boxes.xywh.cpu()
  162. track_ids = results[0].boxes.id.int().cpu().tolist()
  163. for box, track_id in zip(boxes, track_ids):
  164. x, y, w, h = box
  165. # 绘制边界框
  166. cv2.rectangle(frame, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)),
  167. (0, 255, 0), 2)
  168. # 计算左下角坐标
  169. bottom_left_x = int(x - w / 2)
  170. bottom_left_y = int(y + h / 2)
  171. # 计算中心点
  172. center_x = int(x)
  173. center_y = int(y)
  174. # 绘制中心点
  175. cv2.circle(frame, (center_x, center_y), 5, (255, 0, 0), -1) # 红色中心点,半径为 5
  176. # 记录位置
  177. track_history[track_id].append((bottom_left_x, bottom_left_y))
  178. if len(track_history[track_id]) > 100:
  179. del track_history[track_id][:-50] # 维持历史长度
  180. # 记录每一帧的时间
  181. time_stamps[track_id].append(current_time)
  182. # 计算时间间隔
  183. if len(time_stamps[track_id]) > 1:
  184. delta_time = time_stamps[track_id][-1] - time_stamps[track_id][-2] # 最近两帧的时间差
  185. else:
  186. delta_time = 0
  187. instantaneous_velocity = 0
  188. # 计算二维瞬时速度
  189. if len(track_history[track_id]) >= 2:
  190. pos1 = np.array(track_history[track_id][-1]) # 最新位置
  191. pos2 = np.array(track_history[track_id][-2]) # 前一个位置
  192. pos1 = map_to_ellipse(pos1)
  193. pos2 = map_to_ellipse(pos2)
  194. distance = np.linalg.norm(pos1 - pos2)
  195. # 使用时间间隔进行速度计算
  196. instantaneous_velocity = distance / delta_time if delta_time > 0 else np.zeros(2)
  197. instantaneous_velocity_magnitude = round(np.linalg.norm(instantaneous_velocity), 1)
  198. instantaneous_velocities[track_id].append(instantaneous_velocity_magnitude)
  199. else:
  200. instantaneous_velocity_magnitude = 0
  201. # 判断是否有足够数量的高速度
  202. high_velocity_count = sum(
  203. 1 for velocity in instantaneous_velocities[track_id] if velocity > speed_threshold)
  204. if high_velocity_count >= high_velocity_count_threshold and time.time()-dirmodel["postpretime"]>30:
  205. cv2.putText(frame, str(instantaneous_velocity_magnitude), (int(x), int(y)),
  206. cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
  207. flag = True
  208. dirvideo.setdefault(ida,{})
  209. dirvideo[ida]['framel'] = framelist.copy()
  210. dirvideo[ida]['count'] = 0
  211. dirvideo[ida]["postpretime"] = time.time()
  212. dirvideo[ida]['postim'] = frame
  213. dirvideo[ida]['oriim'] = oriimg
  214. #dirmodel['channel'] = channel
  215. dirvideo[ida]['posttime'] = time.localtime(time.time())
  216. ida = ida+1
  217. #if flag and time.time()-dirmodel["postpretime"]>30:
  218. # timesave = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime(time.time()))
  219. # year = time.strftime('%Y', time.localtime(time.time()))
  220. # month = time.strftime('%m', time.localtime(time.time()))
  221. # day = time.strftime('%d', time.localtime(time.time()))
  222. # savefold = f'/mnt/project/images/run/{year}/{month}/{day}'
  223. # savefold = Path(savefold)
  224. # savefold.mkdir(parents=True, exist_ok=True)
  225. # detsavefold = f'/mnt/project/detimages/run/{year}/{month}/{day}'
  226. # detsavefold = Path(detsavefold)
  227. # detsavefold.mkdir(parents=True, exist_ok=True)
  228. # cv2.imwrite(f'{savefold}/{timesave}.jpg', oriimg)
  229. # cv2.imwrite(f'{detsavefold}/{timesave}det.jpg', frame)
  230. # success, encoded_image = cv2.imencode('.jpg', frame)
  231. # content = encoded_image.tobytes()
  232. # successori, encoded_imageori = cv2.imencode('.jpg', oriimg)
  233. # contentori = encoded_imageori.tobytes()
  234. # filename = f'{channel}_{int(time.time())}.jpg'
  235. # filenameori = f'ori{channel}_{int(time.time())}.jpg'
  236. # payload = {'channel': dirmodel['channel'],
  237. # 'classIndex': dirmodel['classindex'],
  238. # 'ip': dirmodel['algip'],
  239. # 'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
  240. # 'videoUrl': dirmodel["VideoUrl"]}
  241. # files = [
  242. # ('file', (filename, content, 'image/jpeg')),
  243. # ('oldFile', (filenameori, contentori, 'image/jpeg')),
  244. # ]
  245. # try:
  246. # result = requests.post(url, data=payload, files=files)
  247. # print(result)
  248. # except Exception:
  249. # print('posterror')
  250. def postvideo(videoqueue):
  251. model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
  252. "/root/.cache/modelscope/hub/models/Qwen/Qwen2.5-VL-7B-Instruct-AWQ",
  253. # "/liq/liq_weight/Qwen2.5vl7b/",
  254. torch_dtype=torch.bfloat16,
  255. attn_implementation="flash_attention_2",
  256. device_map="auto",
  257. )
  258. processor = AutoProcessor.from_pretrained("/root/.cache/modelscope/hub/models/Qwen/Qwen2.5-VL-7B-Instruct-AWQ")
  259. fourcc = cv2.VideoWriter_fourcc(*'XVID')
  260. while True:
  261. timesave = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime(time.time()))
  262. year = time.strftime('%Y', time.localtime(time.time()))
  263. month = time.strftime('%m', time.localtime(time.time()))
  264. day = time.strftime('%d', time.localtime(time.time()))
  265. savefold = f'/mnt/project/images/run/{year}/{month}/{day}'
  266. savefold = Path(savefold)
  267. savefold.mkdir(parents=True, exist_ok=True)
  268. detsavefold = f'/mnt/project/detimages/run/{year}/{month}/{day}'
  269. detsavefold = Path(detsavefold)
  270. detsavefold.mkdir(parents=True, exist_ok=True)
  271. vidsavefold = f'/mnt/project/video/run/{year}/{month}/{day}'
  272. vidsavefold = Path(vidsavefold)
  273. vidsavefold.mkdir(parents=True, exist_ok=True)
  274. if videoqueue.qsize() > 0:
  275. print('---------------------------------')
  276. framelist,postim,oriim,posttime,channel = videoqueue.get()
  277. fps = 25 # 帧率
  278. frame_height,frame_width,_ = oriim.shape
  279. out = cv2.VideoWriter(f'{vidsavefold}/{time.strftime("%Y-%m-%d-%H:%M:%S",posttime)}.mp4', fourcc, fps, (frame_width, frame_height))
  280. for frame in framelist:
  281. out.write(frame)
  282. out.release()
  283. cv2.imwrite(f'{savefold}/{posttime}.jpg', oriim)
  284. cv2.imwrite(f'{detsavefold}/{posttime}det.jpg', postim)
  285. success, encoded_image = cv2.imencode('.jpg', postim)
  286. content = encoded_image.tobytes()
  287. successori, encoded_imageori = cv2.imencode('.jpg', oriim)
  288. contentori = encoded_imageori.tobytes()
  289. filename = f'{channel}_{posttime}.jpg'
  290. filenameori = f'ori{channel}_{posttime}.jpg'
  291. messages = [
  292. {
  293. "role": "user",
  294. "content": [
  295. {
  296. "type": "video",
  297. "video": f'{vidsavefold}/{time.strftime("%Y-%m-%d-%H:%M:%S",posttime)}.mp4',
  298. "max_pixels": 360 * 420,
  299. "fps": 1.0,
  300. },
  301. {"type": "text",
  302. "text": "Please help me determine whether the person in the video is running or walking. If they are running, reply 'running'; if they are walking, reply 'walking'"},
  303. ],
  304. }
  305. ]
  306. text = processor.apply_chat_template(
  307. messages, tokenize=False, add_generation_prompt=True
  308. )
  309. image_inputs, video_inputs = process_vision_info(messages)
  310. inputs = processor(
  311. text=[text],
  312. images=image_inputs,
  313. videos=video_inputs,
  314. padding=True,
  315. return_tensors="pt",
  316. )
  317. inputs = inputs.to("cuda")
  318. generated_ids = model.generate(**inputs, max_new_tokens=15)
  319. generated_ids_trimmed = [
  320. out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
  321. ]
  322. output_text = processor.batch_decode(
  323. generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
  324. )
  325. #assistant_response = output_text[0].split("Assistant:")[-1].strip()
  326. print(f'output_text = {output_text[0].lower()}')
  327. if 'run' in output_text[0].lower():
  328. payload = {'channel': channel,
  329. 'classIndex': '51',
  330. 'ip': "172.19.152.231",
  331. 'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
  332. 'videoUrl': f'{posttime}.mp4'}
  333. files = [
  334. ('file', (filename, content, 'image/jpeg')),
  335. ('oldFile', (filenameori, contentori, 'image/jpeg')),
  336. ]
  337. try:
  338. result = requests.post(url, data=payload, files=files)
  339. print(f'result = {result.json()}')
  340. except Exception:
  341. print('posterror')
  342. else:
  343. print('videozero')
  344. time.sleep(3)
  345. if __name__ == '__main__':
  346. torch.multiprocessing.set_start_method('spawn')
  347. #set_start_method('spawn')
  348. #multiprocessing.set_start_method('spawn')
  349. torch.cuda.set_per_process_memory_fraction(0.6)
  350. opt = parse_opt()
  351. dbpath = 'project.db'
  352. conn = sqlite3.connect(dbpath)
  353. #
  354. # print ("数据库打开成功")
  355. c = conn.cursor()
  356. task(c,conn,urlt,urla)
  357. cursor = c.execute('select channel,algip from stream where modelname = "run"')
  358. result = cursor.fetchall()
  359. for channel ,algip in result:
  360. data = {
  361. "channel": channel,
  362. "ip":algip
  363. }
  364. #personcountdir[channel] = num
  365. address = requests.post(url=urlrtsp,data=data).json()['msg']
  366. c.execute('UPDATE STREAM set address= (?) where channel =(?)',(address,channel))
  367. conn.commit()
  368. cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'run'")
  369. #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
  370. content = cursor.fetchall()
  371. contentlist = []
  372. for con in content:
  373. contentlist.append(con[0])
  374. #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
  375. cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname = 'run'")
  376. contenta = cursor.fetchall()
  377. source = []
  378. modellist = []
  379. addcha = []
  380. channellist = []
  381. for i in contenta:
  382. addcha.append((i[0],i[2]))
  383. modellist.append(i[1])
  384. addcha = set(addcha)
  385. addcha = sorted(addcha,key=lambda x:x[1])
  386. for a,cha in addcha:
  387. source.append(a)
  388. channellist.append(cha)
  389. print(addcha)
  390. #source = set(source)
  391. source = list(source)
  392. #source.sort()
  393. modellist = set(modellist)
  394. n = len(content)
  395. print(f'modelname={n}')
  396. print(content)
  397. #content.reverse()
  398. print(content)
  399. # main(opt)
  400. #processes = []
  401. streamqueue = Queue(maxsize=4)
  402. numworkv = Value('i', 0)
  403. manager = Manager()
  404. lock = multiprocessing.Lock()
  405. streamlist = manager.list()
  406. numworks = 2
  407. #queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks)
  408. #thread.start()
  409. #videoqueue = Queue(maxsize=20)
  410. #thread1 = threading.Thread(target=postvideo, args=(videoqueue,))
  411. #thread1.start()
  412. #pool = ThreadPoolExecutor(max_workers=n)
  413. #runmodel = manager.dict()
  414. modeladir = {}
  415. for modelname in modellist:
  416. if modelname in contentlist:
  417. #if modelname not in runmodel:
  418. #print(i)
  419. #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
  420. #if modelname in ['fall','helmet','bag','arm']:
  421. print(f'weights ={modelname}.pt')
  422. c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
  423. rea = c.fetchall()
  424. #print(f'rea')
  425. modeladir.setdefault(modelname,{})
  426. modeladir[modelname]['conf'] = rea[0][0]
  427. modeladir[modelname]['cla'] = rea[0][1]
  428. tracker_threads = []
  429. videoqueue = Queue(maxsize=20)
  430. thread1 = threading.Thread(target=postvideo,args=(videoqueue,))
  431. thread1.start()
  432. for channel,address in zip(channellist,source):
  433. thread = Process(target=runtracker,args=("run.pt",channel,address,videoqueue))
  434. tracker_threads.append(thread)
  435. thread.start()
  436. for thread in tracker_threads:
  437. thread.join()