Ver Fonte

跑步加入大模型

xtj há 1 mês atrás
pai
commit
761c3d84a7
1 ficheiros alterados com 464 adições e 0 exclusões
  1. 464 0
      runthrbig.py

+ 464 - 0
runthrbig.py

@@ -0,0 +1,464 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
+
+Usage - sources:
+    $ python detect.py --weights yolov5s.pt --source 0                               # webcam
+                                                     img.jpg                         # image
+                                                     vid.mp4                         # video
+                                                     screen                          # screenshot
+                                                     path/                           # directory
+                                                     list.txt                        # list of images
+                                                     list.streams                    # list of streams
+                                                     'path/*.jpg'                    # glob
+                                                     'https://youtu.be/Zgi9g1ksQHc'  # YouTube
+                                                     'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
+
+Usage - formats:
+    $ python detect.py --weights yolov5s.pt                 # PyTorch
+                                 yolov5s.torchscript        # TorchScript
+                                 yolov5s.onnx               # ONNX Runtime or OpenCV DNN with --dnn
+                                 yolov5s_openvino_model     # OpenVINO
+                                 yolov5s.engine             # TensorRT
+                                 yolov5s.mlmodel            # CoreML (macOS-only)
+                                 yolov5s_saved_model        # TensorFlow SavedModel
+                                 yolov5s.pb                 # TensorFlow GraphDef
+                                 yolov5s.tflite             # TensorFlow Lite
+                                 yolov5s_edgetpu.tflite     # TensorFlow Edge TPU
+                                 yolov5s_paddle_model       # PaddlePaddle
+"""
+from datetime import datetime
+from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
+from qwen_vl_utils import process_vision_info
+import matplotlib.path as mat
+import requests
+import argparse
+import os
+import platform
+import sqlite3
+import sys
+import threading
+import time
+from pathlib import Path
+import signal
+import torch
+from concurrent.futures import ThreadPoolExecutor
+from concurrent.futures import ProcessPoolExecutor
+from multiprocessing import Process,Manager,Value
+from multiprocessing import Queue
+from multiprocessing import set_start_method
+import multiprocessing
+import multiprocessing as mp
+import numpy as np
+import platform
+import pathlib
+from collections import defaultdict, deque
+#import websockets
+import asyncio
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[0]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+import math
+from models.common import DetectMultiBackend
+from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLTN
+from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
+                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,strtolstl,apply_classifier1,task)
+from utils.plots import Annotator, colors, save_one_box
+from utils.torch_utils import select_device, smart_inference_mode
+#from testpool import func1,TestA
+from ultralytics import YOLO
+from ultralytics.trackers.bot_sort import BOTSORT
+from ultralytics.utils.checks import check_yaml
+from ultralytics.utils import IterableSimpleNamespace, yaml_load, ops
+from ultralytics.nn.autobackend import AutoBackend
+from ultralytics.utils.ops import non_max_suppression
+from ultralytics.engine.results import Results
+# def my_handler(signum, frame):
+#     exit(0)
+#url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
+plt = platform.system()
+if plt != 'Windows':
+  pathlib.WindowsPath = pathlib.PosixPath
+urlhead = "http://172.19.152.231"
+url = f"{urlhead}/open/api/operate/upload"
+urlele = f"{urlhead}/open/api/operate/fence"
+urlperson = f"{urlhead}/open/api/operate/getPersonLimitNum"
+urlt = f"{urlhead}/open/api/operate/taskList"
+urla = f"{urlhead}/open/api/operate/algorithmList"
+weburl = f"ws://36.7.84.146:28801/websocket/device"
+urlrtsp = f"{urlhead}/open/api/operate/previewURLs"
+personcountdir = {}
+algdir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'duty','98':'face','51':'run'}
+modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person'}
+modelalgdir = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51'}
+algmodel = {}
+for key,value in algdir.items():
+    algmodel[value] = key
+
+
+
+def map_to_ellipse(position):
+    x, y = position
+    center_x = 640
+    center_y = 360
+    a = 580
+    b = 280
+
+    x_norm = x / 1280
+    y_norm = y / 720
+
+    d_norm = math.sqrt((x_norm - 0.5) ** 2 + (y_norm - 0.5) ** 2)
+    theta_norm = math.atan2(y_norm - 0.5, x_norm - 0.5)
+    f = d_norm
+    a_new = a * f
+    b_new = b * f
+
+    bias_x = center_x + a_new * math.cos(theta_norm)
+    bias_y = center_y + b_new * math.sin(theta_norm)
+
+    return np.array([bias_x, bias_y])
+
+
+def parse_opt():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
+    opt = parser.parse_args()
+    return opt
+
+def runtracker(modelname,channel,source,videoqueue):
+    track_history = defaultdict(lambda: [])
+    # 用于存储每个 track_id 最近的时间戳
+    time_stamps = defaultdict(lambda: deque(maxlen=50))  # 固定长度为 50
+    # 用于存储瞬时速度
+    instantaneous_velocities = defaultdict(lambda: deque(maxlen=10))
+    speed_threshold = 10  # 速度阈值
+    high_velocity_count_threshold = 30  # 高速度计数阈值
+    dirmodel = {"channel":channel,"postpretime":0,"classIndex":'',"VideoUrl":source}
+    dirvideo = {}
+    framelist = []
+    cap = cv2.VideoCapture(source)
+    print(f'source = {source} cap = {cap.isOpened()}')
+    model = YOLO(modelname)
+    ida = 0
+    while cap.isOpened():
+        # 记录当前时间
+        flag = False
+        current_time = time.time()
+        # Read a frame from the video
+        success, frame = cap.read()
+        if success:
+            oriimg = frame.copy()
+        if len(framelist) >= 100:
+            framelist.pop(0)
+            framelist.append(oriimg)
+        else:
+            framelist.append(oriimg)
+        idtmp = list(dirvideo.keys())
+        for ida in idtmp:
+            if dirvideo[ida]['count']<=100:
+                dirvideo[ida]['framel'].append(oriimg)
+                dirvideo[ida]['count'] += 1
+                print('addimg')
+            if dirvideo[ida]['count'] ==100:
+                print("appendvideo")
+                videoqueue.put([dirvideo[ida]['framel'],dirvideo[ida]['postim'],dirvideo[ida]['oriim'],dirvideo[ida]['posttime'],channel])
+                dirvideo.pop(ida)
+        if success:
+            # Run YOLO11 tracking on the frame, persisting tracks between frames
+            results = model.track(frame, persist=True, classes=0, conf=0.6)
+            if results[0].boxes and results[0].boxes.id is not None:
+                # Get the boxes and track IDs
+                boxes = results[0].boxes.xywh.cpu()
+                track_ids = results[0].boxes.id.int().cpu().tolist()
+                for box, track_id in zip(boxes, track_ids):
+                    x, y, w, h = box
+                    # 绘制边界框
+                    cv2.rectangle(frame, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)),
+                                  (0, 255, 0), 2)
+                    # 计算左下角坐标
+                    bottom_left_x = int(x - w / 2)
+                    bottom_left_y = int(y + h / 2)
+                    # 计算中心点
+                    center_x = int(x)
+                    center_y = int(y)
+                    # 绘制中心点
+                    cv2.circle(frame, (center_x, center_y), 5, (255, 0, 0), -1)  # 红色中心点,半径为 5
+                    # 记录位置
+                    track_history[track_id].append((bottom_left_x, bottom_left_y))
+                    if len(track_history[track_id]) > 100:
+                        del track_history[track_id][:-50]  # 维持历史长度
+                    # 记录每一帧的时间
+                    time_stamps[track_id].append(current_time)
+                    # 计算时间间隔
+                    if len(time_stamps[track_id]) > 1:
+                        delta_time = time_stamps[track_id][-1] - time_stamps[track_id][-2]  # 最近两帧的时间差
+                    else:
+                        delta_time = 0
+                    instantaneous_velocity = 0
+                    # 计算二维瞬时速度
+                    if len(track_history[track_id]) >= 2:
+                        pos1 = np.array(track_history[track_id][-1])  # 最新位置
+                        pos2 = np.array(track_history[track_id][-2])  # 前一个位置
+
+                        pos1 = map_to_ellipse(pos1)
+                        pos2 = map_to_ellipse(pos2)
+                        distance = np.linalg.norm(pos1 - pos2)
+
+                        # 使用时间间隔进行速度计算
+                        instantaneous_velocity = distance / delta_time if delta_time > 0 else np.zeros(2)
+
+                        instantaneous_velocity_magnitude = round(np.linalg.norm(instantaneous_velocity), 1)
+                        instantaneous_velocities[track_id].append(instantaneous_velocity_magnitude)
+                    else:
+                        instantaneous_velocity_magnitude = 0
+
+                    # 判断是否有足够数量的高速度
+                    high_velocity_count = sum(
+                        1 for velocity in instantaneous_velocities[track_id] if velocity > speed_threshold)
+
+                    if high_velocity_count >= high_velocity_count_threshold and time.time()-dirmodel["postpretime"]>30:
+                        cv2.putText(frame, str(instantaneous_velocity_magnitude), (int(x), int(y)),
+                                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
+                        flag = True
+                        dirvideo.setdefault(ida,{})
+                        dirvideo[ida]['framel'] = framelist.copy()
+                        dirvideo[ida]['count'] = 0
+                        dirvideo[ida]["postpretime"] = time.time()
+                        dirvideo[ida]['postim'] = frame
+                        dirvideo[ida]['oriim'] = oriimg
+                        #dirmodel['channel'] = channel
+                        dirvideo[ida]['posttime'] = time.localtime(time.time())
+                        ida = ida+1
+
+        #if flag and time.time()-dirmodel["postpretime"]>30:
+        #    timesave = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime(time.time()))
+        #    year = time.strftime('%Y', time.localtime(time.time()))
+        #    month = time.strftime('%m', time.localtime(time.time()))
+        #    day = time.strftime('%d', time.localtime(time.time()))
+        #    savefold = f'/mnt/project/images/run/{year}/{month}/{day}'
+        #    savefold = Path(savefold)
+        #    savefold.mkdir(parents=True, exist_ok=True)
+        #    detsavefold = f'/mnt/project/detimages/run/{year}/{month}/{day}'
+        #    detsavefold = Path(detsavefold)
+        #    detsavefold.mkdir(parents=True, exist_ok=True)
+        #    cv2.imwrite(f'{savefold}/{timesave}.jpg', oriimg)
+        #    cv2.imwrite(f'{detsavefold}/{timesave}det.jpg', frame)
+        #    success, encoded_image = cv2.imencode('.jpg', frame)
+        #    content = encoded_image.tobytes()
+        #    successori, encoded_imageori = cv2.imencode('.jpg', oriimg)
+        #    contentori = encoded_imageori.tobytes()
+        #    filename = f'{channel}_{int(time.time())}.jpg'
+        #    filenameori = f'ori{channel}_{int(time.time())}.jpg'
+        #    payload = {'channel': dirmodel['channel'],
+        #               'classIndex': dirmodel['classindex'],
+        #               'ip': dirmodel['algip'],
+        #               'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
+        #               'videoUrl': dirmodel["VideoUrl"]}
+        #    files = [
+        #        ('file', (filename, content, 'image/jpeg')),
+        #        ('oldFile', (filenameori, contentori, 'image/jpeg')),
+        #    ]
+        #    try:
+        #        result = requests.post(url, data=payload, files=files)
+        #        print(result)
+        #    except Exception:
+        #        print('posterror')
+
+def postvideo(videoqueue):
+    model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
+        "/root/.cache/modelscope/hub/models/Qwen/Qwen2.5-VL-7B-Instruct-AWQ",
+        # "/liq/liq_weight/Qwen2.5vl7b/",
+        torch_dtype=torch.bfloat16,
+        attn_implementation="flash_attention_2",
+        device_map="auto",
+    )
+    processor = AutoProcessor.from_pretrained("/root/.cache/modelscope/hub/models/Qwen/Qwen2.5-VL-7B-Instruct-AWQ")
+    fourcc = cv2.VideoWriter_fourcc(*'XVID')
+    while True:
+        timesave = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime(time.time()))
+        year = time.strftime('%Y', time.localtime(time.time()))
+        month = time.strftime('%m', time.localtime(time.time()))
+        day = time.strftime('%d', time.localtime(time.time()))
+        savefold = f'/mnt/project/images/run/{year}/{month}/{day}'
+        savefold = Path(savefold)
+        savefold.mkdir(parents=True, exist_ok=True)
+        detsavefold = f'/mnt/project/detimages/run/{year}/{month}/{day}'
+        detsavefold = Path(detsavefold)
+        detsavefold.mkdir(parents=True, exist_ok=True)
+        vidsavefold = f'/mnt/project/video/run/{year}/{month}/{day}'
+        vidsavefold = Path(vidsavefold)
+        vidsavefold.mkdir(parents=True, exist_ok=True)
+        if videoqueue.qsize() > 0:
+
+            print('---------------------------------')
+            framelist,postim,oriim,posttime,channel = videoqueue.get()
+            fps = 25  # 帧率
+            frame_height,frame_width,_ = oriim.shape
+            out = cv2.VideoWriter(f'{vidsavefold}/{time.strftime("%Y-%m-%d-%H:%M:%S",posttime)}.mp4', fourcc, fps, (frame_width, frame_height))
+            for frame in framelist:
+                out.write(frame)
+            out.release()
+            cv2.imwrite(f'{savefold}/{posttime}.jpg', oriim)
+            cv2.imwrite(f'{detsavefold}/{posttime}det.jpg', postim)
+            success, encoded_image = cv2.imencode('.jpg', postim)
+            content = encoded_image.tobytes()
+            successori, encoded_imageori = cv2.imencode('.jpg', oriim)
+            contentori = encoded_imageori.tobytes()
+            filename = f'{channel}_{posttime}.jpg'
+            filenameori = f'ori{channel}_{posttime}.jpg'
+            messages = [
+                {
+                    "role": "user",
+                    "content": [
+                        {
+                            "type": "video",
+                            "video": f'{vidsavefold}/{time.strftime("%Y-%m-%d-%H:%M:%S",posttime)}.mp4',
+                            "max_pixels": 360 * 420,
+                            "fps": 1.0,
+                        },
+                        {"type": "text",
+                         "text": "Please help me determine whether the person in the video is running or walking. If they are running, reply 'running'; if they are walking, reply 'walking'"},
+                    ],
+                }
+            ]
+            text = processor.apply_chat_template(
+                messages, tokenize=False, add_generation_prompt=True
+            )
+            image_inputs, video_inputs = process_vision_info(messages)
+            inputs = processor(
+                text=[text],
+                images=image_inputs,
+                videos=video_inputs,
+                padding=True,
+                return_tensors="pt",
+            )
+            inputs = inputs.to("cuda")
+            generated_ids = model.generate(**inputs, max_new_tokens=15)
+            generated_ids_trimmed = [
+                out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
+            ]
+            output_text = processor.batch_decode(
+                generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
+            )
+            #assistant_response = output_text[0].split("Assistant:")[-1].strip()
+            print(f'output_text = {output_text[0].lower()}')
+            if 'run' in output_text[0].lower():
+                payload = {'channel': channel,
+                           'classIndex': '51',
+                           'ip': "172.19.152.231",
+                           'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
+                           'videoUrl': f'{posttime}.mp4'}
+                files = [
+                    ('file', (filename, content, 'image/jpeg')),
+                    ('oldFile', (filenameori, contentori, 'image/jpeg')),
+                ]
+                try:
+                    result = requests.post(url, data=payload, files=files)
+                    print(f'result = {result.json()}')
+                except Exception:
+                    print('posterror')
+        else:
+            print('videozero')
+            time.sleep(3)
+
+
+
+
+
+if __name__ == '__main__':
+    torch.multiprocessing.set_start_method('spawn')
+    #set_start_method('spawn')
+    #multiprocessing.set_start_method('spawn')
+    torch.cuda.set_per_process_memory_fraction(0.6)
+    opt = parse_opt()
+    dbpath = 'project.db'
+    conn = sqlite3.connect(dbpath)
+#
+# print ("数据库打开成功")
+    c = conn.cursor()
+    task(c,conn,urlt,urla)
+    cursor = c.execute('select channel,algip  from stream where modelname = "run"')
+    result = cursor.fetchall()
+    for channel ,algip in result:
+        data = {
+        "channel": channel,
+        "ip":algip
+        }
+        #personcountdir[channel] = num
+        address = requests.post(url=urlrtsp,data=data).json()['msg']
+        c.execute('UPDATE STREAM set address= (?) where channel =(?)',(address,channel))
+    conn.commit()
+    cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'run'")
+    #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+    content = cursor.fetchall()
+    contentlist = []
+    for con in content:
+        contentlist.append(con[0])
+    #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
+    cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname = 'run'")
+    contenta = cursor.fetchall()
+    source = []
+    modellist = []
+    addcha = []
+    channellist = []
+    for i in contenta:
+        addcha.append((i[0],i[2]))
+        modellist.append(i[1])
+    addcha = set(addcha)
+    addcha = sorted(addcha,key=lambda x:x[1])
+    for a,cha in addcha:
+        source.append(a)
+        channellist.append(cha)
+    print(addcha)
+    #source = set(source)
+    source = list(source)
+    #source.sort()
+    modellist = set(modellist)
+    n = len(content)
+    print(f'modelname={n}')
+    print(content)
+    #content.reverse()
+    print(content)
+    # main(opt)
+    #processes = []
+    streamqueue = Queue(maxsize=4)
+    numworkv = Value('i', 0)
+    manager = Manager()
+    lock = multiprocessing.Lock()
+    streamlist = manager.list()
+    numworks = 2
+
+    #queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks)
+    #thread.start()
+    #videoqueue = Queue(maxsize=20)
+    #thread1 = threading.Thread(target=postvideo, args=(videoqueue,))
+    #thread1.start()
+    #pool = ThreadPoolExecutor(max_workers=n)
+    #runmodel = manager.dict()
+    modeladir = {}
+    for modelname in modellist:
+        if modelname in contentlist:
+            #if modelname not in runmodel:
+        #print(i)
+        #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
+                #if modelname in ['fall','helmet','bag','arm']:
+            print(f'weights ={modelname}.pt')
+            c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
+            rea = c.fetchall()
+            #print(f'rea')
+            modeladir.setdefault(modelname,{})
+            modeladir[modelname]['conf'] = rea[0][0]
+            modeladir[modelname]['cla'] = rea[0][1]
+    tracker_threads = []
+    videoqueue = Queue(maxsize=20)
+    thread1 = threading.Thread(target=postvideo,args=(videoqueue,))
+    thread1.start()
+    for channel,address in zip(channellist,source):
+        thread = Process(target=runtracker,args=("run.pt",channel,address,videoqueue))
+        tracker_threads.append(thread)
+        thread.start()
+    for thread in tracker_threads:
+        thread.join()
+