Эх сурвалжийг харах

更新detectopencvmut03autalq.py

xtj 1 долоо хоног өмнө
parent
commit
2df612873e
1 өөрчлөгдсөн 935 нэмэгдсэн , 0 устгасан
  1. 935 0
      detectopencvmut03autalq.py

+ 935 - 0
detectopencvmut03autalq.py

@@ -0,0 +1,935 @@
+# �� by Ultralytics, AGPL-3.0 license
+"""
+Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
+Usage - sources:
+    $ python detect.py --weights yolov5s.pt --source 0                               # webcam
+                                                     img.jpg                         # image
+                                                     vid.mp4                         # video
+                                                     screen                          # screenshot
+                                                     path/                           # directory
+                                                     list.txt                        # list of images
+                                                     list.streams                    # list of streams
+                                                     'path/*.jpg'                    # glob
+                                                     'https://youtu.be/Zgi9g1ksQHc'  # YouTube
+                                                     'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
+Usage - formats:
+    $ python detect.py --weights yolov5s.pt                 # PyTorch
+                                 yolov5s.torchscript        # TorchScript
+                                 yolov5s.onnx               # ONNX Runtime or OpenCV DNN with --dnn
+                                 yolov5s_openvino_model     # OpenVINO
+                                 yolov5s.engine             # TensorRT
+                                 yolov5s.mlmodel            # CoreML (macOS-only)
+                                 yolov5s_saved_model        # TensorFlow SavedModel
+                                 yolov5s.pb                 # TensorFlow GraphDef
+                                 yolov5s.tflite             # TensorFlow Lite
+                                 yolov5s_edgetpu.tflite     # TensorFlow Edge TPU
+                                 yolov5s_paddle_model       # PaddlePaddle
+"""
+import matplotlib.path as mat
+import requests
+import argparse
+import os
+import platform
+import sqlite3
+import sys
+import threading
+import time
+from pathlib import Path
+import signal
+import torch
+from concurrent.futures import ThreadPoolExecutor
+from concurrent.futures import ProcessPoolExecutor
+from multiprocessing import Process, Manager, Value
+from multiprocessing import Queue
+from multiprocessing import set_start_method
+import multiprocessing
+import multiprocessing as mp
+import numpy as np
+from torchvision import transforms
+import random
+
+
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[0]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+from models.common import DetectMultiBackend
+from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams, LoadStreamsSQLNEWN, LoadStreamsSQL, \
+    LoadStreamsSQLNRERT, LoadStreamsVEight, LoadStreamsSQLT, LoadStreamsSQLTNP,preprocess
+from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
+                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,
+                           strtolst, apply_classifier1, apply_classifieruniform, compute_IOU, task, apply_classifierarm,numpy_image_to_base64)
+from utils.plots import Annotator, colors, save_one_box
+from utils.torch_utils import select_device, smart_inference_mode
+from utils.renwu import newHelmet, newUniform, newFall, Personcount, Arm, Bag, Cross, Extinguisher, newPersontre, Bag, Danager,CarHelmetBelt,Clear
+from loguru import logger
+# from testpool import func1,TestA
+# def my_handler(signum, frame):
+#     exit(0)
+# url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
+url = "http://172.19.152.231/open/api/operate/upload"
+urlrtsp = "http://172.19.152.231/open/api/operate/previewURLs"
+urlt = "http://172.19.152.231/open/api/operate/taskList"
+urla = "http://172.19.152.231/open/api/operate/algorithmList"
+urlele = "http://172.19.152.231/open/api/operate/fence"
+urltime = "http://172.19.152.231/open/api/operate/getTime"
+urlperson = "http://172.19.152.231/open/api/operate/getPersonLimitNum"
+urlque = "http://172.19.152.231:8001/analyze/"
+questiondir = {
+            "uniform":"图片中的所有人都穿工作服了吗,请回答是或否",
+            "fall":"图片中的人摔倒了吗,请回答是或否",
+            "persontre":"图片中的人坐踩手推车了吗,请回答是或否", 
+            "helmet":"图片中的人戴工作帽了吗,清回答是或否"
+        }
+
+# modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person'}
+# algmodel = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51'}
+modelnamedir = {'0': 'helmet', '8': 'danager', '10': 'uniform', '14': 'smoke', '16': 'fire', '21': 'cross',
+                '25': 'fall', '29': 'occupancy', '30': 'liquid', '31': 'pressure', '32': 'sleep', '34': 'personcount',
+                '37': 'other', '38': 'duty', '98': 'face', '55': 'oil', '52': 'jingdian', '53': 'rope',
+                '54': 'personcar', '39': 'inspection', '11': 'reflective', '12': 'phone', '66': 'extinguisher',
+                '67': 'belt', '68': 'menjin', '35': 'arm', '36': 'persontre', '33': 'bag','62':'clear'}
+modellabeldir = {'0': 'head,person', '8': 'person', '10': 'black_work_clothes,blue_work_clothes,person', '14': 'smoke',
+                 '16': 'fire', '21': 'cross', '25': 'fall', '29': 'car', '30': 'liquid', '31': 'pressure',
+                 '32': 'sleep', '34': 'personcount', '37': 'other', '38': 'person', '98': 'face', '55': 'oil',
+                 '52': 'person,hand,ball', '53': 'rope', '54': 'person', '39': 'person',
+                 '11': 'blue,greent,whitet,bluecoat,whitebarcoat,graycoat,baoan,chenyi,other', '12': 'phone',
+                 '66': 'extinguisher', '67': 'person,head,helmet,belt', '68': 'person', '35': 'barearm,arm',
+                 '36': 'person,foot,cart,bag,box', '33': 'handbox,handbag','62':'hand'}
+modelalgdir = {}
+personcountdir = {}
+for key, value in modelnamedir.items():
+    modelalgdir[value] = key
+taskmap = {'helmet': newHelmet, 'uniform': newUniform, 'fall': newFall, 'personcount': Personcount, 'arm': Arm, 'bag': Bag,
+        'cross': Cross, 'extinguisher': Extinguisher, 'persontre': newPersontre, 'bag': Bag, 'danager': Danager,'belt':CarHelmetBelt,'clear':Clear}
+mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
+test = transforms.Compose([transforms.Resize((224, 224)),
+                           # transforms.CenterCrop(224),
+                           transforms.ToTensor(),
+                           transforms.Normalize(mean=mean, std=std)
+                           ])
+def clapre(modelcla, claimg, clapoint):
+    imgten = torch.stack(claimg, dim=0)
+    clapoint = torch.stack(clapoint, dim=0)
+    imgten = imgten.to(0)
+    result = modelcla(imgten)
+    result = F.softmax(result)
+    print(result)
+    index = result.argmax(1)
+    index = index.cpu().numpy()
+    index = np.argwhere(index < 5)
+    index = index.reshape(-1)
+    print(index)
+    if len(index) > 0:
+        print(clapoint[index])
+        return clapoint[index]
+    else:
+        return None
+class YoloOpt:
+    def __init__(self, weights=ROOT / 'yolov5s.pt', source=ROOT / 'data/images', data=ROOT / 'data/coco128.yaml',
+                 imgsz=(640, 640),
+                 conf_thres=0.25,
+                 iou_thres=0.45,
+                 max_det=1000,
+                 device='',
+                 view_img=False,
+                 save_txt=False,
+                 save_conf=False,
+                 save_crop=False,
+                 nosave=True,
+                 classes=None,
+                 agnostic_nms=False,
+                 augment=False,
+                 visualize=False,
+                 update=False,
+                 project=ROOT / 'runs/detect',
+                 name='exp',
+                 exist_ok=False,
+                 line_thickness=1,
+                 hide_labels=False,
+                 hide_conf=False,
+                 half=False,
+                 dnn=False,
+                 vid_stride=10,
+                 classify=False,
+                 v8=False):
+        self.weights = weights  # 权重文件地址
+        self.source = source  # 待识别的图像
+        self.data = data
+        if imgsz is None:
+            self.imgsz = (640, 640)
+        self.imgsz = imgsz  # 输入图片的大小,默认 (640,640)
+        self.conf_thres = conf_thres  # object置信度阈值 默认0.25  用在nms中
+        self.iou_thres = iou_thres  # 做nms的iou阈值 默认0.45   用在nms中
+        self.device = device  # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法
+        self.view_img = view_img  # 是否展示预测之后的图片或视频 默认False
+        self.classes = classes  # 只保留一部分的类别,默认是全部保留
+        self.agnostic_nms = agnostic_nms  # 进行NMS去除不同类别之间的框, 默认False
+        self.augment = augment  # augmented inference TTA测试时增强/多尺度预测,可以提分
+        self.update = update  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+        self.exist_ok = exist_ok  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+        self.project = project  # 保存测试日志的参数,本程序没有用到
+        self.name = name  # 每次实验的名称,本程序也没有用到
+        self.max_det = max_det
+        self.save_txt = save_txt
+        self.save_conf = save_conf
+        self.save_crop = save_crop
+        self.nosave = nosave
+        self.visualize = visualize
+        self.line_thickness = line_thickness
+        self.hide_labels = hide_labels
+        self.hide_conf = hide_conf
+        self.half = half
+        self.dnn = dnn
+        self.vid_stride = vid_stride
+        self.classify = classify
+        self.v8 = v8
+class Detect:
+    def __init__(self, weights=ROOT / 'yolov5s.pt', imgsz=(640, 640), source="changshusql1103.db", classes=None,
+                 device=None, classify=False, conf_thres=0.25, v8=False):
+        print(f'detectweights = {weights}')
+        if v8:
+            from ultralytics.nn.autobackend import AutoBackend
+            from ultralytics.utils.ops import non_max_suppression
+        else:
+            from utils.general import non_max_suppression
+        self.opt = YoloOpt(weights=weights, imgsz=imgsz, source=source, classes=classes, device=device,
+                           classify=classify, conf_thres=conf_thres, v8=v8)
+        self.source = str(self.opt.source)
+        self.save_img = not self.opt.nosave and not source.endswith('.txt')  # save inference images
+        is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
+        is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
+        self.webcam = self.source.isnumeric() or source.endswith('.db') or (is_url and not is_file)
+        screenshot = self.source.lower().startswith('screen')
+        if is_url and is_file:
+            self.source = check_file(self.source)  # download
+        self.save_dir = increment_path(Path(self.opt.project) / self.opt.name,
+                                       exist_ok=self.opt.exist_ok)  # increment run
+        # self.save_dir = self.save_dir / Path(self.opt.weights).stem
+        # self.save_dir.mkdir(parents=True, exist_ok=True)
+        (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True,
+                                                                                 exist_ok=True)  # make dir
+        print(f'device = {self.opt.device}')
+        device = select_device(self.opt.device)
+        if v8:
+            self.model = AutoBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data,
+                                     fp16=self.opt.half)
+            if Path(weights).stem in ['arm', 'uniform','fall','persontre','belt']:
+                if Path(weights).stem == 'arm':
+                    self.personmodel = AutoBackend('yolov8m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data,
+                                                   fp16=self.opt.half)
+                elif Path(weights).stem in ['uniform','fall','persontre','belt']:
+                    self.personmodel = AutoBackend('yolo11m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data,
+                                                   fp16=self.opt.half)
+        else:
+            self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data,
+                                            fp16=self.opt.half)
+            if Path(weights).stem in ['helmet', 'arm']:
+                self.personmodel = DetectMultiBackend('personcount.pt', device=device, dnn=self.opt.dnn,
+                                                      data=self.opt.data, fp16=self.opt.half)
+        self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
+        self.classify = classify
+        if self.classify:
+            if Path(weights).stem != "arm":
+                classifier_model = torch.load(f"{Path(weights).stem}cls.pt")
+                self.classifier_model = classifier_model.to(device)
+                self.classifier_model.eval()
+            else:
+                self.classifier_model = AutoBackend(f"{Path(weights).stem}cls.pt", device=device, dnn=self.opt.dnn,
+                                                    data=self.opt.data, fp16=self.opt.half)
+        #self.imgsz = check_img_size(self.opt.imgsz, s=self.stride)
+        if Path(weights).stem in ['helmet','uniform'] :
+            self.imgsz = [960,960]
+        elif Path(weights).stem in ['persontre']:
+            self.imgsz = [1280,1280]
+        else:
+           self.imgsz = [640,640]
+        self.model.warmup(imgsz=(1, 3, *self.imgsz))
+        self.readpoint()
+        savefold = Path(f'/home/h3c/yolo/log/{Path(weights).stem}')
+        savefold.mkdir(parents=True, exist_ok=True)
+        logger.remove()
+        self.logger = logger.bind(process_log=f'/home/h3c/yolo/log/{Path(weights).stem}/file.log')
+        self.logger.add(f'./log/{Path(weights).stem}/file.log', rotation="1 GB",retention="3 days")
+        #self.logger.remove()
+        print(self.imgsz)
+        self.updatetime = time.time()
+        self.updatemtime = time.time()
+        self.filetime = os.path.getmtime(self.opt.weights)
+        self.taskname = taskmap[Path(self.opt.weights).stem]()
+        t1 = threading.Thread(target=self.load, daemon=True)
+        t1.start()
+
+    @smart_inference_mode()
+    def infer(self, queue, runmodel):
+        pretime = time.time()
+        seen, windows, self.dt = 0, [], (Profile(), Profile(), Profile())
+        #
+        # print ("数据库打开成功")
+        while True:
+            if time.localtime().tm_hour not in range(7, 20):
+                time.sleep(30)
+                continue
+            # print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
+            if time.time() - pretime > 300:
+                ret = self.readpoint()
+                pretime = time.time()
+                if not ret:
+                    print(f'{Path(self.opt.weights).stem} {runmodel}')
+                    runmodel.pop(Path(self.opt.weights).stem)
+                    print(f'{Path(self.opt.weights).stem} {runmodel}')
+                    break
+            print(f'queuelen = {len(queue)}')
+            for que in queue:
+                if que.qsize() == 0:
+                    print('queuezero')
+                    time.sleep(0.01)
+                if que.qsize() > 0:
+                    # if time.time()-pretime>300:
+                    #    ret = self.readpoint()
+                    #    pretime = time.time()
+                    #    if not ret:
+                    #        print(f'{Path(self.opt.weights).stem} {runmodel}')
+                    #        runmodel.pop(Path(self.opt.weights).stem)
+                    #        print(f'{Path(self.opt.weights).stem} {runmodel}')
+                    #        break
+                    setframe = que.get()
+                    # print('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')
+                    # if setframe is not None
+                    path, im0s, vid_cap, s, videotime, channels = setframe
+                    algchannel = list(self.dirmodel.keys())
+                    print(algchannel)
+                    print(path)
+                    algchannel = np.array(algchannel)
+                    channelsnp = np.array(channels)
+                    algindex = np.where(np.in1d(channelsnp, algchannel))[0]
+                    algindex = list(algindex)
+                    path = np.array(path)
+                    path = path[algindex]
+                    path = path.tolist()
+                    channels = np.array(channels)
+                    channels = channels[algindex]
+                    channels = channels.tolist()
+                    # print(algindex)
+                    if len(algindex) == 0:
+                        continue
+                    # for ia in algindex:
+                    #    print(type(im0s[ia]))
+                    #    print(im0s[ia].shape)
+                    #im = im[algindex]
+                    # for ia in algindex:
+                    #    print(type(ia))
+                    try:
+                        im0s = np.asarray(im0s)
+                    except Exception:
+                        im0s = np.asarray(im0s, dtype=object)
+                    print(im0s.shape)
+                    im0s = im0s[algindex]
+                    if '61' in channels:
+                        self.pt = False
+                    im = preprocess(im0s,self.pt,self.imgsz,self.stride)
+                    # im0s = im0s.tolist()
+                    print(f'algindex = {algindex}')
+                    print(f'im0s ={im0s[0].shape}')
+                    videotime = np.array(videotime)
+                    videotime = videotime[algindex]
+                    videotime = tuple(map(tuple, videotime))
+
+                    with self.dt[0]:
+                        im = torch.from_numpy(im).to(self.model.device)
+                        im = im.half() if self.model.fp16 else im.float()  # uint8 to fp16/32
+                        im /= 255  # 0 - 255 to 0.0 - 1.0
+                        if len(im.shape) == 3:
+                            im = im[None]  # expand for batch dim
+                    # Inference
+                    with self.dt[1]:
+                        visualize = increment_path(self.save_dir / Path(path).stem,
+                                                   mkdir=True) if self.opt.visualize else False
+                        # print('error')
+                        # print(self.model)
+                        pred = self.model(im, augment=self.opt.augment, visualize=visualize)
+                    self.postprocess(pred, path, im0s, im, s, videotime, channels)
+
+    def postprocess(self, pred, path, im0s, im, s, videotime, channels):
+        if time.time() - self.updatemtime > 300:
+            if self.filetime != os.path.getmtime(self.opt.weights):
+                try:
+                    device = select_device(self.opt.device)
+                    print("load new load")
+                    self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data,
+                                                fp16=self.opt.half)
+                    self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
+                    self.filetime = os.path.getmtime(self.opt.weights)
+                except Exception:
+                    print("load error")
+            # try:
+            # if  modelalgdir[Path(self.opt.weights).stem]!='0':
+            print(modelalgdir[Path(self.opt.weights).stem])
+            try:
+                rea = requests.post(url=urla, data={'algorithmCode': modelalgdir[Path(self.opt.weights).stem]}).json()[
+                    'data']
+                con = rea[0]['confidence']
+                self.opt.conf_thres = con
+            except Exception:
+                print('error')
+
+            self.updatemtime = time.time()
+        seen = 0
+        # dt = (Profile(), Profile(), Profile())
+        print(f'senn = {seen}')
+        windows = []
+        if Path(self.opt.weights).stem:
+            labelnamelist = []
+        with self.dt[2]:
+            # print(f'cropshape={pred.shape}')
+            if self.opt.v8:
+                from ultralytics.utils.ops import non_max_suppression
+            else:
+                from utils.general import non_max_suppression
+            if Path(self.opt.weights).stem == "fall":
+                pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, self.opt.classes,
+                                           self.opt.agnostic_nms, max_det=self.opt.max_det,nc=1)
+            else:
+                pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, self.opt.classes,
+                                       self.opt.agnostic_nms, max_det=self.opt.max_det)
+            # Second-stage classifier (optional)
+            # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
+            if self.classify and Path(self.opt.weights).stem != 'persontre':
+                if Path(self.opt.weights).stem == 'arm':
+                    pred = apply_classifierarm(pred, self.classifier_model, im, im0s, Path(self.opt.weights).stem)
+                else:
+                    pred = apply_classifier1(pred, self.classifier_model, im, im0s, Path(self.opt.weights).stem)
+            # Process predictions
+            # print(f'predshape={pred.shape}')
+        for i, det in enumerate(pred):  # per image
+            if time.time() - self.updatetime > 300:
+                dataele = {
+                    "algorithmCode": self.dirmodel[channels[i]]['classindex'],
+                    "algorithmIp": self.dirmodel[channels[i]]['algip'],
+                    "channel": self.dirmodel[channels[i]]['channel']
+                }
+                try:
+                    resultele = requests.post(url=urlele, data=dataele).json()['data']['pointCollections']
+                    resultele = resultele.split(',||')
+                    resultele = tuple(resultele)
+                    point = '%s:' * len(resultele) % resultele
+                    if len(point[:-2]) > 1:
+                        self.dirmodel[channels[i]]['point'] = point[:-2]
+                except Exception:
+                    print('post error')
+                if Path(self.opt.weights).stem == 'personcount':
+                    try:
+                        resultper = requests.post(url=urlperson, data=dataele).json()['data']
+                        personcountdir[channels[i]] = int(resultper)
+                    except Exception:
+                        print('urlpersonerror')
+                if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty':
+                    datatime = {
+                        "algorithmCode": self.dirmodel[channels[i]]['classindex'],
+                        "algorithmIp": self.dirmodel[channels[i]]['algip'],
+                        "channel": self.dirmodel[channels[i]]['channel']
+                    }
+                    try:
+                        resulttime = requests.post(url=urltime, data=dataele).json()['data']
+                        self.dirmodel[channel]['durtime'] = int(resulttime)
+                    except Exception:
+                        print('posttime error')
+                self.updatetime = time.time()
+            seen += 1
+            if self.webcam:  # batch_size >= 1
+                p, im0 = path[i], im0s[i].copy()
+                s += f'{i}: '
+            else:
+                p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
+            p = Path(p)  # to Path
+            save_path = str(self.save_dir / p.name)  # im.jpg
+            # txt_path = str(self.save_dir / 'labels' / p.stem) + (
+            # '' #if dataset.mode == 'image' else f'_{frame}')  # im.txt
+            s += '%gx%g ' % im.shape[2:]  # print string
+            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
+            imc = im0.copy()  # for save_crop
+            annotator = Annotator(im0, line_width=self.opt.line_thickness, example=str(self.names))
+            flag = False
+            if len(det) and Path(self.opt.weights).stem != 'duty':
+                # flag = True
+                # Rescale boxes from img_size to im0 size
+                det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
+                # Print results
+                for c in det[:, 5].unique():
+                    n = (det[:, 5] == c).sum()  # detections per class
+                    s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, "  # add to string
+                # Write results
+                if Path(self.opt.weights).stem in ['arm', 'uniform','fall','persontre','belt']:
+                    personpred = self.personmodel(im[i][None], None, None)
+                    personpred = non_max_suppression(personpred, 0.7, self.opt.iou_thres, 0,
+                                                     self.opt.agnostic_nms, max_det=self.opt.max_det)
+                    if len(personpred[0]) == 0:
+                        flag = False
+                    elif Path(self.opt.weights).stem == 'other':
+                        persondet = []
+                        personpred = personpred[0]
+                        personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
+                        for *perxyxy, conf, cls in reversed(personpred):
+                            print(perxyxy)
+                            x1, y1, x3, y3 = perxyxy
+                            x1, y1, x3, y3 = int(x1), int(y1), int(x3), int(y3)
+                            x2, y2 = x3, y1
+                            x4, y4 = x1, y3
+                        flag = self.taskname.getflag(det, persondet, annotator, self.dirmodel[channels[i]]['fence'],
+                                                     self.dirmodel[channels[i]]['point'], self.names,
+                                                     self.dirmodel[channels[i]]['label'],channel=channls[i])
+                    else:
+                        persondet = []
+                        personpred = personpred[0]
+                        personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
+                        for *perxyxy, conf, cls in reversed(personpred):
+                            print(perxyxy)
+                            if conf < 0.8:
+                                continue
+                            x1, y1, x3, y3 = perxyxy
+                            x1, y1, x3, y3 = int(x1), int(y1), int(x3), int(y3)
+                            x2, y2 = x3, y1
+                            x4, y4 = x1, y3
+                            persondet.append([x1, y1, x2, y2, x3, y3, x4, y4])
+                        if Path(self.opt.weights).stem == "fall":
+                            flag = self.taskname.getflag(det, persondet, annotator, self.dirmodel[channels[i]]['fence'],
+                                                         self.dirmodel[channels[i]]['point'], self.names,
+                                                         self.dirmodel[channels[i]]['label'],imshape=im.shape[2:],channel=channels[i])
+                        else:
+                            flag = self.taskname.getflag(det, persondet, annotator, self.dirmodel[channels[i]]['fence'],
+                                                         self.dirmodel[channels[i]]['point'], self.names,
+                                                         self.dirmodel[channels[i]]['label'],channel=channels[i])
+                else:
+                    if Path(self.opt.weights).stem in ['personcount']:
+                        flag = self.taskname.getflag(det, None, annotator, self.dirmodel[channels[i]]['fence'],
+                                                     self.dirmodel[channels[i]]['point'], self.names,
+                                                     self.dirmodel[channels[i]]['label'], personcountdir[channels[i]],channel=channels[i])
+                    elif Path(self.opt.weights).stem in ['persontre']:
+                        flag = self.taskname.getflag(det, None, annotator, self.dirmodel[channels[i]]['fence'],
+                                                     self.dirmodel[channels[i]]['point'], self.names,
+                                                     self.dirmodel[channels[i]]['label'], 1, imc,channel=channels[i])
+                    else:
+                        flag = self.taskname.getflag(det, None, annotator, self.dirmodel[channels[i]]['fence'],
+                                                     self.dirmodel[channels[i]]['point'], self.names,
+                                                     self.dirmodel[channels[i]]['label'],channel=channels[i])
+            if flag:
+                # if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
+                self.dirmodel[channels[i]]['detframe'].pop(0)
+                self.dirmodel[channels[i]]['detframe'].append(1)
+                self.dirmodel[channels[i]]['preim'] = annotator.result()
+                self.dirmodel[channels[i]]['oripreim'] = imc
+                self.dirmodel[channels[i]]['posttime'] = videotime[i]
+                print(self.dirmodel[channels[i]]['detframe'])
+                # self.dirmodel[channels[i]]['imgtime'] = videotime[i]
+            else:
+                # print(f'deti= {i}')
+                # print(detframe[i])
+                # if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
+                self.dirmodel[channels[i]]['detframe'].pop(0)
+                self.dirmodel[channels[i]]['detframe'].append(0)
+                print(self.dirmodel[channels[i]]['detframe'])
+
+            if not self.dirmodel[channels[i]]['detflag'] and self.dirmodel[channels[i]]['detframe'].count(1) >= 1:
+                self.dirmodel[channels[i]]['detflag'] = True
+                self.dirmodel[channels[i]]['detpretime'] = time.time()
+            elif self.dirmodel[channels[i]]['detframe'].count(1) == 0:
+                self.dirmodel[channels[i]]['detflag'] = False
+                self.dirmodel[channels[i]]['detpretime'] = float('inf')
+            # Stream results
+            # im0 = annotator.result()
+            if time.time() - self.dirmodel[channels[i]]['postpretime'] > 30 and time.time() - \
+                    self.dirmodel[channels[i]]['detpretime'] > self.dirmodel[channels[i]]['durtime'] and \
+                    self.dirmodel[channels[i]]['detflag']:
+
+                print('post-------------------------------------------------------------------------')
+                # time.sleep(30)
+                # print(time.time() - postpretime[i])
+                # print('111111111111111111111111111111111111111111111111')
+                # print(dirmodel[channels[i]]['preim'].shape)
+                success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim'])
+                content = encoded_image.tobytes()
+                successori, encoded_imageori = cv2.imencode('.jpg', self.dirmodel[channels[i]]['oripreim'])
+                contentori = encoded_imageori.tobytes()
+                filename = f'{p.stem}_{int(time.time())}.jpg'
+                filenameori = f'ori{p.stem}_{int(time.time())}.jpg'
+                print(f'str(p) {p.name}')
+                print(channels[i])
+
+                #交互大模型
+                if Path(self.opt.weights).stem in ['uniform','fall','persontre', 'helmet']:
+                    postdir = {'image_base64':numpy_image_to_base64(self.dirmodel[channels[i]]['oripreim']),'question':questiondir[Path(self.opt.weights).stem]}
+
+                    response = requests.post(urlque,json=postdir).json()['data']['answer']
+                    #print(f'response = {response}')
+
+                    #if random.choice([True, False]):
+                    if "是" in response and (Path(self.opt.weights).stem == "uniform" or Path(self.opt.weights).stem == "helmet"):
+                        continue
+                    if "否" in response and (Path(self.opt.weights).stem == "fall" or Path(self.opt.weights).stem == "persontre"):
+                        continue
+                payload = {'channel': self.dirmodel[channels[i]]['channel'],
+                           'classIndex': self.dirmodel[channels[i]]['classindex'],
+                           'ip': self.dirmodel[channels[i]]['algip'],
+                           'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', self.dirmodel[channels[i]]['posttime']),
+                           'videoUrl': channels[i]}
+                files = [
+                    ('file', (filename, content, 'image/jpeg')),
+                    ('oldFile', (filenameori, contentori, 'image/jpeg')),
+                ]
+                try:
+                    result = requests.post(url, data=payload, files=files)
+                    print(result)
+                except Exception:
+                    print('posterror')
+                # time.sleep(3000)
+                self.dirmodel[channels[i]]['postpretime'] = time.time()
+                self.dirmodel[channels[i]]['detflag'] = False
+                timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime'])
+                year = time.strftime('%Y', time.localtime(time.time()))
+                month = time.strftime('%m', time.localtime(time.time()))
+                day = time.strftime('%d', time.localtime(time.time()))
+                savefold = f'/mnt/project/images/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
+                savefold = Path(savefold)
+                savefold.mkdir(parents=True, exist_ok=True)
+                detsavefold = f'/mnt/project/detimages/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
+                detsavefold = Path(detsavefold)
+                detsavefold.mkdir(parents=True, exist_ok=True)
+                cv2.imwrite(f'{savefold}/{timesave}.jpg', self.dirmodel[channels[i]]['oripreim'])
+                cv2.imwrite(f'{detsavefold}/{timesave}det.jpg', self.dirmodel[channels[i]]['preim'])
+            # if self.dirmodel[channels[i]]['detframe'].count(1)==0:
+            #    self.dirmodel[channels[i]]['detflag'] = False
+            # time.sleep(1)
+            self.view_img = False
+            if self.view_img:
+                if platform.system() == 'Linux' and p not in windows:
+                    windows.append(p)
+                    cv2.namedWindow(f'{str(p)}-{Path(self.opt.weights).stem}',
+                                    cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
+                    cv2.resizeWindow(f'{str(p)}-{Path(self.opt.weights).stem}', im0.shape[1], im0.shape[0])
+                im1 = cv2.resize(im0, (1280, 720))
+                cv2.imshow(f'{str(p)}-{Path(self.opt.weights).stem}', im1)
+                cv2.waitKey(1)  # 1 millisecond
+            # Save results (image with detections)
+            # Print time (inference-only)
+            print(f'channels[i]={channels[i]}')
+            #LOGGER.info(
+            #    f"{s}{'' if len(det) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms {str(p)}-{Path(self.opt.weights).stem}")
+            self.logger.info(f"{s}{'' if len(det) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms {str(p)}-{Path(self.opt.weights).stem}")
+    def load(self):
+        conn = sqlite3.connect(self.source)
+        c = conn.cursor()
+        while True:
+            #
+            # print ("数据库打开成功")
+            cursor = c.execute(
+                "SELECT modelname, addstream,delstream,streaming  from CHANGESTREAM WHERE modelname= (?)",
+                (Path(self.opt.weights).stem,))
+
+            self.contentid = cursor.fetchall()
+            # global tag
+            # tag = Value('i', self.contentid[0][3])
+            # print(tag.value==1)
+            print(f'loadcontent={self.contentid[0][3]}')
+            time.sleep(3)
+        c.close()
+        conn.close()
+
+    def readpoint(self):
+        data = {
+            "algorithmCode": modelalgdir[Path(self.opt.weights).stem],
+            "deviceIp": None,
+            'fwqCode': None
+        }
+        self.dirmodel = {}
+        result = requests.post(url=urlt, data=data).json()['data']
+        channell = []
+        for info in result:
+            # content = cursor.fetchall()
+            # self.dirmodel = {}
+            # for address,fence,point,channel,classindex,ip ,algip,label,durtime in content:
+            # address = f'{address[:-1]}0'
+            channel = info["deviceChannel"]
+            if Path(self.opt.weights).stem == "danager" and channel == '45':
+                continue
+            channell.append(channel)
+            self.dirmodel[channel] = {}
+            self.dirmodel[channel]['fence'] = 1 if len(info["electricFence"]) > 0 else 0
+            if Path(self.opt.weights).stem == "uniform":
+                self.dirmodel[channel]['fence'] = 1
+            # self.dirmodel[channel]['point'] = point
+            self.dirmodel[channel]['channel'] = info['deviceChannel']
+            self.dirmodel[channel]['classindex'] = info['algorithmCode']
+            self.dirmodel[channel]['ip'] = info['deviceIp']
+            self.dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
+            dataele = {
+                "algorithmCode": self.dirmodel[channel]['classindex'],
+                "algorithmIp": self.dirmodel[channel]['algip'],
+                "channel": self.dirmodel[channel]['channel']
+            }
+            resultele = requests.post(url=urlele, data=dataele).json()['data']['pointCollections']
+            resultele = resultele.split(',||')
+            resultele = tuple(resultele)
+            point = '%s:' * len(resultele) % resultele
+            if Path(self.opt.weights).stem == 'personcount':
+                resultper = requests.post(url=urlperson, data=dataele).json()['data']
+                personcountdir[channel] = int(resultper)
+            if (Path(self.opt.weights).stem == "uniform") and len(
+                    point[:-2]) <= 1:
+                self.dirmodel[channel]['point'] = "150#144,1100#144,1100#550,150#550"
+            else:
+                self.dirmodel[channel]['point'] = point[:-2]
+            self.dirmodel[channel]['preim'] = None
+            self.dirmodel[channel]['oripreim'] = None
+            self.dirmodel[channel]['detframe'] = [0 for _ in range(2)]
+            self.dirmodel[channel]['postpretime'] = 0
+            self.dirmodel[channel]['detflag'] = False
+            self.dirmodel[channel]['detpretime'] = float('inf')
+            self.dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
+            if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty':
+                datatime = {
+                    "algorithmCode": self.dirmodel[channel]['classindex'],
+                    "algorithmIp": self.dirmodel[channel]['algip'],
+                    "channel": self.dirmodel[channel]['channel']
+                }
+                resulttime = requests.post(url=urltime, data=dataele).json()['data']
+                self.dirmodel[channel]['durtime'] = int(resulttime)
+            else:
+                self.dirmodel[channel]['durtime'] = 0
+            self.dirmodel[channel]['posttime'] = 0
+        return sorted(channell)
+
+def getframe(queuelist, channelsl, source, tt, numworks, lock, numworkv):
+    while True:
+        print("dataloader")
+        imgsz = [768, 768]
+        print(f'source = {source}')
+        dataset = LoadStreamsSQLTNP(channelsl, source, img_size=896,
+                                   auto=True, vid_stride=20, tt=tt, numworks=numworks)
+        bs = len(dataset)
+        vid_path, vid_writer = [None] * bs, [None] * bs
+        # self.detframe = [[0 for _ in range(8)] for i in range(bs)]
+        # self.postpretime = [0]*bs
+        # Run inference
+        # imgsz = (1 , 3, *self.imgsz)
+        print(imgsz)
+        # self.model.warmup(imgsz=(1 , 3, *imgsz))  # warmup
+        seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+        #
+        # print ("数据库打开成功")
+        pretime = time.time()
+        tag = 0
+        sourcebase = 'project0117.db'
+        for path, im0s, vid_cap, s, videotime, channels in dataset:
+            if time.time() - pretime > 300:
+                channellist = []
+                pretime = time.time()
+                data = {
+                    "algorithmCode": None,
+                    "deviceIp": None,
+                    "fwqCode": None
+                }
+                try:
+                    result = requests.post(url=urlt, data=data).json()['data']
+                except Exception:
+                    result = []
+                for info in result:
+                    data = {
+                        "channel": info["deviceChannel"],
+                        "ip": info["deviceAlgorithmIp"]
+                    }
+                    chaflag = any(info["deviceChannel"] in t for t in channellist)
+                    # personcountdir[channel] = num
+                    if not chaflag:
+                        address = requests.post(url=urlrtsp, data=data).json()['msg']
+                        channellist.append((info['deviceChannel'], address))
+                channelsa = []
+                sourcea = []
+                channellist = set(channellist)
+                channellist = sorted(channellist, key=lambda x: x[0])
+                # channellist = set(channellist)
+                for cha, add in channellist:
+                    channelsa.append(cha)
+                    sourcea.append(add)
+                channelsl = sorted(channelsl)
+                # channelsa = sorted(channelsa)
+                if channelsa != channelsl and len(channelsa) > 0:
+                    print(f'channelsa = {channelsa}')
+                    print(f'channelsl = {channelsl}')
+                    dataset.close()
+                    channelsl = channelsa
+                    source = sourcea
+                    break;
+            for key, value in queuelist.items():
+                hour = time.localtime(time.time()).tm_hour
+                if hour in range(7, 19):
+                    value[-1].put((path, im0s, vid_cap, s, videotime, channels))
+                    value[-1].get() if value[-1].qsize() == 10 else time.sleep(0.001)
+
+def getmutpro(channels, source, streamlist, numworkv, lock, numworks=1, modellen=None):
+    processlist = []
+    queuelist = {}
+    for i in range(numworks):
+        for model in modellen:
+            queue = Queue(maxsize=10)
+            queuelist.setdefault(model, [])
+            queuelist[model].append(queue)
+        process = Process(target=getframe,
+                          args=(queuelist, channels, source, i, numworks, lock, numworkv))
+        processlist.append(process)
+        process.start()
+        # queuelist.append(queue)
+    return queuelist
+
+def modelfun(queue, weights, sourcedb, classes, device, classify, conf_thres, runmodel, v8=False):
+    print(weights)
+    detectdemo = Detect(weights=weights, source=sourcedb, classes=classes, device=device, classify=classify,
+                        conf_thres=conf_thres, v8=v8)
+    detectdemo.infer(queue, runmodel)
+
+def parse_opt():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
+    opt = parser.parse_args()
+    return opt
+
+def main(opt):
+    check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+    run(**vars(opt))
+
+if __name__ == '__main__':
+    # torch.multiprocessing.set_start_method('spawn')
+    # set_start_method('spawn')
+    opt = parse_opt()
+    dbpath = 'projectnew.db'
+    conn = sqlite3.connect(dbpath)
+    #
+    # print ("数据库打开成功")
+    c = conn.cursor()
+    task(c, conn, urlt, urla)
+    cursor = c.execute('select channel,algip  from stream ')
+    result = cursor.fetchall()
+    for channel, algip in result:
+        data = {
+            "channel": channel,
+            "ip": algip
+        }
+        # personcountdir[channel] = num
+        address = requests.post(url=urlrtsp, data=data).json()['msg']
+        c.execute('UPDATE STREAM set address= (?) where channel =(?)', (address, channel))
+    conn.commit()
+    cursor = c.execute(
+        "SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname ='duty'  or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname= 'arm' or modelname = 'persontre' or modelname = 'bag' or modelname = 'fall' or modelname = 'belt' or modelname = 'clear'")
+    # cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+    content = cursor.fetchall()
+    cursor = c.execute("SELECT address,channel from STREAM ")
+    # cursor = c.execute("SELECT address from STREAM where modelname = 'helmet'")
+    contenta = cursor.fetchall()
+    source = []
+    modellist = []
+    addcha = []
+    channellist = []
+    for i in contenta:
+        addcha.append((i[0], i[1]))
+        # modellist.append(i[1])
+    addcha = set(addcha)
+    addcha = sorted(addcha, key=lambda x: x[1])
+    for add, cha in addcha:
+        source.append(add)
+        channellist.append(cha)
+    # source = set(source)
+    print(addcha)
+    source = list(source)
+    cursor = c.execute(
+        "SELECT modelname from STREAM where (modelname ='helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname = 'duty'  or modelname = 'sleep'  or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag' or modelname = 'fall' or modelname = 'belt' or modelname = 'clear')")
+    contentm = cursor.fetchall()
+    for m in contentm:
+        modellist.append(m[0])
+    modellist = set(modellist)
+    modellist = list(modellist)
+    contentlist = []
+    for i in content:
+        contentlist.append(i[0])
+    # source.sort()
+    n = len(content)
+    print(f'modelname={n}')
+    print(content)
+    # content.reverse()
+    print(content)
+    print(source)
+    # main(opt)
+    # processes = []
+    streamqueue = Queue(maxsize=4)
+    numworkv = Value('i', 0)
+    manager = Manager()
+    lock = multiprocessing.Lock()
+    streamlist = manager.list()
+    numworks = 7
+    modellen = []
+    for i in modellist:
+        if i in contentlist:
+            modellen.append(i)
+    queuelist = getmutpro(channellist, source, streamlist, numworkv, lock, numworks, modellen)
+    deid = 0
+    # pool = ThreadPoolExecutor(max_workers=n)
+    runmodel = manager.dict()
+    while True:
+        for i in modellist:
+            if i in contentlist:
+                if i not in runmodel:
+                    # print(i)
+                    # detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
+                    c.execute('select conf,cla from changestream where modelname = (?)', (i,))
+                    rea = c.fetchall()
+                    print(f'weights ={i[0]}.pt')
+                    if i in ['duty', 'danager', 'inspection', 'cross', 'personcount']:
+                        process = Process(target=modelfun, args=(
+                            queuelist[i], f'{i}.pt', dbpath, [0], 0, rea[0][1], rea[0][0], runmodel, True))
+                    else:
+                        if i in ['fall', 'extinguisher']:
+                            process = Process(target=modelfun, args=(
+                                queuelist[i], f'{i}.pt', dbpath, None, 0, rea[0][1], rea[0][0], runmodel,True))
+                        else:
+                            process = Process(target=modelfun, args=(
+                                queuelist[i], f'{i}.pt', dbpath, None, 0, rea[0][1], rea[0][0], runmodel, True))
+                    time.sleep(3)
+                    process.start()
+                    deid = deid + 1
+                    runmodel[i] = 1
+        time.sleep(600)
+        task(c, conn, urlt, urla)
+        cursor = c.execute(
+            "SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname ='duty'  or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag' or modelname = 'fall' or modelname = 'belt' or modelname = 'clear'")
+        content = cursor.fetchall()
+        contentlist = []
+        for con in content:
+            contentlist.append(con[0])
+        cursor = c.execute("SELECT address,channel from STREAM ")
+        contenta = cursor.fetchall()
+        source = []
+        modellist = []
+        addcha = []
+        channellist = []
+        for i in contenta:
+            addcha.append((i[0], i[1]))
+            # modellist.append(i[1])
+        addcha = set(addcha)
+        addcha = sorted(addcha)
+        for a, cha in addcha:
+            source.append(a)
+            channellist.append(cha)
+        print(addcha)
+        source = list(source)
+        # source.sort()
+        cursor = c.execute(
+            "SELECT modelname from STREAM where (modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname = 'duty'  or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag' or modelname = 'fall' or modelname = 'belt' or modelname = 'clear')")
+        contentm = cursor.fetchall()
+        for m in contentm:
+            modellist.append(m[0])
+        modellist = set(modellist)
+        n = len(content)
+        print(f'modelname={n}')
+        print(content)
+        # content.reverse()
+        print(content)