# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. Usage - sources: $ python detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU yolov5s_paddle_model # PaddlePaddle """ import matplotlib.path as mat import requests import argparse import os import platform import sqlite3 import sys import threading import time from pathlib import Path import signal import torch from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ProcessPoolExecutor from multiprocessing import Process,Manager,Value from multiprocessing import Queue from multiprocessing import set_start_method import multiprocessing import multiprocessing as mp import numpy as np import platform import pathlib from collections import defaultdict, deque #import websockets import asyncio FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative import math from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLTN from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,strtolstl,apply_classifier1,task) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, smart_inference_mode #from testpool import func1,TestA from ultralytics import YOLO from ultralytics.trackers.bot_sort import BOTSORT from ultralytics.utils.checks import check_yaml from ultralytics.utils import IterableSimpleNamespace, yaml_load, ops from ultralytics.nn.autobackend import AutoBackend from ultralytics.utils.ops import non_max_suppression from ultralytics.engine.results import Results # def my_handler(signum, frame): # exit(0) #url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload" plt = platform.system() if plt != 'Windows': pathlib.WindowsPath = pathlib.PosixPath urlhead = "http://172.19.152.231" url = f"{urlhead}/open/api/operate/upload" urlele = f"{urlhead}/open/api/operate/fence" urlperson = f"{urlhead}/open/api/operate/getPersonLimitNum" urlt = f"{urlhead}/open/api/operate/taskList" urla = f"{urlhead}/open/api/operate/algorithmList" weburl = f"ws://36.7.84.146:28801/websocket/device" urlrtsp = f"{urlhead}/open/api/operate/previewURLs" personcountdir = {} algdir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'duty','98':'face','51':'run'} modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person'} modelalgdir = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51'} algmodel = {} for key,value in algdir.items(): algmodel[value] = key def on_predict_postprocess_end(predictor: object, persist: bool = False,im0s=None) -> None: """ Postprocess detected boxes and update with object tracking. Args: predictor (object): The predictor object containing the predictions. persist (bool): Whether to persist the trackers if they already exist. Examples: Postprocess predictions and update with tracking >>> predictor = YourPredictorClass() >>> on_predict_postprocess_end(predictor, persist=True) """ for i in range(len(im0s)): tracker = predictor.trackers[i] det = predictor.results[i].boxes.cpu().numpy() #print(f'det = {det}') if len(det) == 0: continue #print(f'predet = {type(det)}') tracks = tracker.update(det, im0s[i]) if len(tracks) == 0: continue idx = tracks[:, -1].astype(int) predictor.results[i] = predictor.results[i][idx] update_args = {"boxes": torch.as_tensor(tracks[:, :-1])} predictor.results[i].update(**update_args) def map_to_ellipse(position): x, y = position center_x = 640 center_y = 360 a = 580 b = 280 x_norm = x / 1280 y_norm = y / 720 d_norm = math.sqrt((x_norm - 0.5) ** 2 + (y_norm - 0.5) ** 2) theta_norm = math.atan2(y_norm - 0.5, x_norm - 0.5) f = d_norm a_new = a * f b_new = b * f bias_x = center_x + a_new * math.cos(theta_norm) bias_y = center_y + b_new * math.sin(theta_norm) return np.array([bias_x, bias_y]) class YoloOpt: def __init__(self, weights=ROOT / 'yolov5s.pt',source=ROOT / 'data/images',data=ROOT / 'data/coco128.yaml', imgsz=(640,640), conf_thres=0.80, iou_thres=0.45, max_det=1000, device='', view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=True, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=ROOT / 'runs/detect', name='exp', exist_ok=False, line_thickness=1, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=10, classify=False): self.weights = weights # 权重文件地址 self.source = source # 待识别的图像 self.data = data if imgsz is None: self.imgsz = (640, 640) self.imgsz = imgsz # 输入图片的大小,默认 (640,640) self.conf_thres = conf_thres # object置信度阈值 默认0.25 用在nms中 self.iou_thres = iou_thres # 做nms的iou阈值 默认0.45 用在nms中 self.device = device # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法 self.view_img = view_img # 是否展示预测之后的图片或视频 默认False self.classes = classes # 只保留一部分的类别,默认是全部保留 self.agnostic_nms = agnostic_nms # 进行NMS去除不同类别之间的框, 默认False self.augment = augment # augmented inference TTA测试时增强/多尺度预测,可以提分 self.update = update # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False self.exist_ok = exist_ok # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False self.project = project # 保存测试日志的参数,本程序没有用到 self.name = name # 每次实验的名称,本程序也没有用到 self.max_det = max_det self.save_txt = save_txt self.save_conf= save_conf self.save_crop= save_crop self.nosave = nosave self.visualize = visualize self.line_thickness = line_thickness self.hide_labels = hide_labels self.hide_conf = hide_conf self.half = half self.dnn = dnn self.vid_stride = vid_stride self.classify = classify class Detect: def __init__(self, weights = ROOT / 'yolov5s.pt' , imgsz=(640,640),source="changshusql1103.db",classify=False,conf_thres=0.80,device='',channelsl=''): print(f'detectweights = {weights}') self.opt = YoloOpt(weights=weights, imgsz=imgsz,source=source,classify=classify,conf_thres=conf_thres,device=device) self.source = str(self.opt.source) self.save_img = not self.opt.nosave and not source.endswith('.txt') # save inference images is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) self.webcam = True screenshot = self.source.lower().startswith('screen') if is_url and is_file: self.source = check_file(self.source) # download self.save_dir = increment_path(Path(self.opt.project) / self.opt.name, exist_ok=self.opt.exist_ok) # increment run #self.save_dir = self.save_dir / Path(self.opt.weights).stem #self.save_dir.mkdir(parents=True, exist_ok=True) (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) # make dir #self.imgsz = check_img_size(self.opt.imgsz, s=self.stride) # self.readchannel = self.readpoint() #print(self.imgsz) self.updatetime = time.time() #self.filetime = os.path.getmtime(self.opt.weights) bs = 1 # batch_size if self.webcam: #self.view_img = check_imshow(warn=True) self.view_img = False # dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) tt= 0 numworks = 1 self.dataset = LoadStreamsSQLTN(channelsl, source, img_size=640, auto=True, vid_stride=10, tt=tt, numworks=numworks) # bs = len(dataset) elif screenshot: dataset = LoadScreenshots(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt) else: self.dataset = LoadImages(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt, vid_stride=self.opt.vid_stride) self.speed_threshold = 60 self.high_velocity_count_threshold = 20 # t1 = threading.Thread(target=self.load,daemon=True) # t1.start() @smart_inference_mode() def infer(self,weights,classify,conf_thres,device,runmodel): tracker = check_yaml(r'/home/h3c/.local/lib/python3.8/site-packages/ultralytics/cfg/trackers/botsort.yaml') cfg = IterableSimpleNamespace(**yaml_load(tracker)) device = select_device(device) print("loadmodel device{device}") model = AutoBackend(weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half) stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(self.opt.imgsz, s=stride) model.warmup(imgsz=(1, 3, *imgsz)) trackers = [] for _ in range(len(self.dataset)): tracker = BOTSORT(args=cfg, frame_rate=30) trackers.append(tracker) model.trackers = trackers # stride, names, pt = model.stride, model.names, model.pt if classify: classifier_model = torch.load(f"{Path(weights).stem}cls.pt") classifier_model = classifier_model.to(device) classifier_model.eval() print('classify--------------------------------------------------------------------') #imgsz = check_img_size(self.opt.imgsz, s=stride) #model.warmup(imgsz=(1, 3, *imgsz)) readchannel,dirmodel = self.readpoint(weights) #print(imgsz) updatetime = time.time() filetime = os.path.getmtime(weights) #selfreadpoint(); pretime = time.time() seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) # # print ("数据库打开成功") #async with websockets.connect(uri) as websocket: # for key in dirmodel.keys(): # dirmodel[key]['websoc'] = await websockets.connect(dirmodel[key]['web']) for path, im, im0s, vid_cap, s, videotime,channels in self.dataset: #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa') #if len(queue) == 0: # print('queuezero') # time.sleep(0.01) #print(f'timee = {time.time()-timee}') hour = time.localtime(time.time()).tm_hour if hour not in range(7,18): time.sleep(30) continue if time.time()-pretime>300: ret,_ = self.readpoint(weights) # for key in dirmodel.keys(): # dirmodel[key]['websoc'] = await websockets.connect(dirmodel[key]['web']) pretime = time.time() if not ret: print(f'{Path(weights).stem} {runmodel}') runmodel.pop(Path(weights).stem) print(f'{Path(weights).stem} {runmodel}') break algchannel = list(dirmodel.keys()) #print(algaddress) print(path) algchannel = np.array(algchannel) #pathnp = np.array(path) channelsnp = np.array(channels) algindex = np.where(np.in1d(channelsnp, algchannel))[0] algindex = list(algindex) if len(algindex)==0: continue path = np.array(path) path = path[algindex] path = path.tolist() channels = np.array(channels) channels = channels[algindex] channels = channels.tolist() #im = im[algindex] #print(im0s.shape) im = im[algindex] #if len(im0s)==1: # im0tmp = np.asarray(im0s) if len(im0s)==1: im0tmp = np.asarray(im0s) else: im0tmp = np.asarray(im0s,dtype = object) #print(f'im0tmplen = {len(im0tmp)}') #if len(im0tmp)>1: #except Exception: #im0tmp = np.asarray(im0s) #print('no object') im0s = im0tmp im0s = im0s[algindex] # im0s = im0s.tolist() print(f'algindex = {algindex}') print(f'im0s ={im0s[0].shape}') videotime = np.array(videotime) videotime = videotime[algindex] videotime = tuple(map(tuple, videotime)) # if self.contentid[0][3] == 1 : # dataset.close() # print('newstreaming=', self.contentid[0][3]) # conn = sqlite3.connect(self.source) # c = conn.cursor() # c.execute("UPDATE CHANGESTREAM set streamimg = 0 , addstream=0,delstream=0 where modelname= (?)",(Path(self.opt.weights).stem,)) # print(123) # conn.commit() # c.close() # conn.close() # print('opencv1') # cv2.destroyAllWindows() # print('opencv') # break # else: # print('nonewstreaming=', self.contentid[0][3]) with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: visualize = False #print('error') print(im.shape) pred =model(im, augment=self.opt.augment, visualize=visualize) if classify: self.postprocess(pred, path, im0s, im, s, videotime,channels,names,dt,classify,updatetime,dirmodel,weights,filetime,classifier_model,model) #pass else: self.postprocess(pred, path, im0s, im, s, videotime, channels, names, dt, classify, updatetime, dirmodel, weights, filetime, None,model) #await self.postprocess(pred, path, im0s, im, s, videotime, channels, names, dt, classify, updatetime, #dirmodel, weights, filetime, None) # print(f'predshape= {') # NMS #processlist = [] #for i in range(3): # process = Process(target=self.postprocess,args=(pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)) # process = Process(target=self.preprocess) # process.start() # processlist.append(process) #for j in processlist: # j.join() #with ProcessPoolExecutor(3) as ppool: #for i in range(3): # print('hello') #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s) #ppool.submit(func1, '张三', i) #ppool.submit(self.preprocess) #self.postprocess(pred, path, im0s, dataset, im, s) #process = Process(target=self.postprocess, args=(pred, path, im0s, dataset, im, s)) #self.postprocess(pred, path, im0s, im, s,videotime) #process.start() #def preprocess(self): # print('preprocess-----------------------------------------------') def postprocess(self, pred, path, im0s, im, s,videotime,channels,names,dt,classify,updatetime,dirmodel,weights,filetime,classifier_model,model): seen = 0 # dt = (Profile(), Profile(), Profile()) print(f'senn = {seen}') windows = [] with dt[2]: #print(f'cropshape={pred.shape}') pred = non_max_suppression(pred, 0.6, self.opt.iou_thres, 0, self.opt.agnostic_nms, max_det=self.opt.max_det) if classify: pred = apply_classifier1(pred, classifier_model, im, im0s,Path(weights).stem) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions #print(f'predshape={pred.shape}') model.results = [] for i, det in enumerate(pred): # per image current_time = time.time() if time.time()-updatetime>300: if filetime !=os.path.getmtime(weights): device = select_device(self.opt.device) print("load new load") model = YOLO(weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half) stride, names, pt = model.stride, model.names, model.pt dataele = { "algorithmCode": dirmodel[channels[i]]['classindex'], "algorithmIp":dirmodel[channels[i]]['algip'], "channel":dirmodel[channels[i]]['channel'] } try: resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections'] resultele = resultele.split(',||') resultele = tuple(resultele) point = '%s:'*len(resultele) %resultele dirmodel[channels[i]]['point'] = point[:-2] except Exception: print('post error') if Path(weights).stem == 'personcount': try: resultper = requests.post(url=urlperson,data=dataele).json()['data'] personcountdir[channels[i]] = resultper except Exception: print('urlpersonerror') #try: if modelalgdir[Path(weights).stem]!='0': print(modelalgdir[Path(weights).stem]) rea = requests.post(url=urla,data={'algorithmCode':modelalgdir[Path(weights).stem]}).json()['data'] con = rea[0]['confidence'] conf_thres = con else: conf_thres = 0.25 #except Exception: #print('posturlaerror') updatetime = time.time() seen += 1 if self.webcam: # batch_size >= 1 p, im0 = path[i], im0s[i].copy() im0 = np.uint8(im0) s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path #save_path = str(self.save_dir / p.name) # im.jpg #txt_path = str(self.save_dir / 'labels' / p.stem) + ( # '' #if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() # for save_crop annotator = Annotator(im0, line_width=self.opt.line_thickness, example=str(names)) flag = False for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string det[:, :4] = ops.scale_boxes(im.shape[2:], det[:, :4], im0s[i].shape) # print(pred[:,:4]) model.results.append(Results(im0, path=path[i], names={0: 'person'}, boxes=det)) on_predict_postprocess_end(model, True, im0s) for i,det in enumerate(model.results): flag = False print(f'++++++++++++++++++++++{det.boxes}') if det.boxes and det.boxes.id is not None: print('-----------------------------------') if Path(weights).stem == 'personcount': personnum = 0 if Path(weights).stem == 'persontre': tredir = {} #flag = True # Rescale boxes from img_size to im0 size boxes = det.boxes.xywh.cpu() track_ids = det.boxes.id.int().cpu().tolist() # Print results # for c in det[:, 5].unique(): # n = (det[:, 5] == c).sum() # detections per class # s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results #message = '[' for box, track_id in zip(boxes, track_ids): x, y, w, h = box if dirmodel[channels[i]]['fence'] == 1: c = int(cls) labelname = names[c] if labelname in dirmodel[channels[i]]['label']: point = strtolstl(dirmodel[channels[i]]['point']) for poi in point: p1 = (int(xyxy[0].cpu().item()+(xyxy[2].cpu().item()-xyxy[0].cpu().item())/3), xyxy[3].cpu().item()) p2 = (int(xyxy[0].cpu().item()+(xyxy[2].cpu().item()-xyxy[0].cpu().item())/3*2), xyxy[3].cpu().item()) p3 = (int(xyxy[0])+(int(xyxy[2])-int(xyxy[0]))/2,int(xyxy[1])+(int(xyxy[3])-int(xyxy[1]))/2) pt = [p1,p2] if Path(weights).stem == 'danager': pt = [p3] inflag = mat.Path(poi).contains_points(pt) if inflag.any(): x, y, x2, y2 = xyxy w = x2 - x h = y2 - y flag = True #message = message + '{' + 'x:' + str(int(x)) + ',y:' + str(int(y)) + ',w:' + str( #int(w)) + ',h:' + str(int(h)) + ',c:' + "'" + str(round(float(conf), 2)) + "'}," #c = int(cls) # integer class #label = f'{self.names[c]} {conf:.2f}' label = None annotator.box_label(xyxy, label, color=colors(c, True)) else: im0 = model.results[i].orig_img.copy() imc = im0.copy() cv2.rectangle(im0, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), (0, 255, 0), 2) bottom_left_x = int(x - w / 2) bottom_left_y = int(y + h / 2) # 计算中心点 center_x = int(x) center_y = int(y) # 绘制中心点 cv2.circle(im0, (center_x, center_y), 5, (255, 0, 0), -1) # 红色中心点,半径为 5 dirmodel[channels[i]]['track_history'][track_id].append((bottom_left_x, bottom_left_y)) if len(dirmodel[channels[i]]['track_history'][track_id]) > 100: del dirmodel[channels[i]]['track_history'][track_id][:-50] # 维持历史长度 # 记录每一帧的时间 dirmodel[channels[i]]['time_stamps'][track_id].append(current_time) # 计算时间间隔 if len(dirmodel[channels[i]]['time_stamps'][track_id]) > 1: delta_time = dirmodel[channels[i]]['time_stamps'][track_id][-1] - dirmodel[channels[i]]['time_stamps'][track_id][-2] # 最近两帧的时间差 else: delta_time = 0 instantaneous_velocity = 0 # 计算二维瞬时速度 if len(dirmodel[channels[i]]['track_history'][track_id]) >= 2: pos1 = np.array(dirmodel[channels[i]]['track_history'][track_id][-1]) # 最新位置 pos2 = np.array(dirmodel[channels[i]]['track_history'][track_id][-2]) # 前一个位置 pos1 = map_to_ellipse(pos1) pos2 = map_to_ellipse(pos2) distance = np.linalg.norm(pos1 - pos2) # 使用时间间隔进行速度计算 instantaneous_velocity = distance / delta_time if delta_time > 0 else np.zeros(2) instantaneous_velocity_magnitude = round(np.linalg.norm(instantaneous_velocity), 1) dirmodel[channels[i]]['instantaneous_velocities'][track_id].append(instantaneous_velocity_magnitude) else: instantaneous_velocity_magnitude = 0 # 判断是否有足够数量的高速度 high_velocity_count = sum( 1 for velocity in dirmodel[channels[i]]['instantaneous_velocities'][track_id] if velocity > self.speed_threshold) if high_velocity_count >= self.high_velocity_count_threshold: cv2.putText(im0, str(instantaneous_velocity_magnitude), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) flag = True #if self.opt.save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view( # -1).tolist() # normalized xywh # line = (cls, *xywh, conf) if self.opt.save_conf else (cls, *xywh) # label format # with open(f'{txt_path}.txt', 'a') as f: # f.write(('%g ' * len(line)).rstrip() % line + '\n') #if self.save_img or self.opt.save_crop or self.view_img: # Add bbox to image # c = int(cls) # integer class # labelname = names[c] # if labelname in dirmodel[channels[i]]['label']: # #if Path(self.opt.weights).stem == 'arm': # #label = f'{self.names[c]} {conf:.2f}' # if labelname != 'arm': # label = None # annotator.box_label(xyxy, label, color=colors(c, True)) # flag = True # x, y, x2, y2 = xyxy # w = x2 - x # h = y2 - y # #message = message + '{' + 'x:' + str(int(x)) + ',y:' + str(int(y)) + ',w:' + str( # # int(w)) + ',h:' + str(int(h)) + ',c:' + "'" + str(round(float(conf), 2)) + "'}," # if Path(weights).stem == 'personcount': # personnum = personnum+1 # if Path(weights).stem == 'arm': # if labelname == 'arm': # flag = False # if Path(weights).stem == 'persontre': # if c ==4: # tredir.setdefault(0,[]) # tredir[0].append(xyxy) # elif c in [0,1,2]: # tredir.setdefault(1,[]) # tredir[1].append(xyxy) # print('not dence-----------------------------------------------------') # #if self.opt.save_crop: # # save_one_box1(xyxy, imc, file=self.save_dir / 'crops' / self.names[c] / f'{p.stem}.jpg', # # BGR=Truei) # #message = message[:-1] + ']' # #if len(message)>2: # #print(message) # #print(dirmodel[channels[i]]['web']) # #print(dirmodel[channels[i]]['websoc']) # # await dirmodel[channels[i]]['websoc'].send(message) # # await asyncio.sleep(0.005) # if Path(weights).stem == 'personcount': # if personnum < int(personcountdir[channels[i]]): # flag = False # if Path(weights).stem == 'persontre': # if len(list(tredir.keys()))==2: # result = judgeiou(tredir) # if result == True: # flag = True # else: # flag = False # else: # flag = False #if len(self.dirmodel[channels[i]]['framelist'])>=50: # self.dirmodel[channels[i]]['framelist'].pop(0) # self.dirmodel[channels[i]]['framelist'].append(annotator.result()) #else: # self.dirmodel[channels[i]]['framelist'].append(annotator.result()) if flag: #if self.dirmodel[path[i]]['imgtime'] != videotime[i]: dirmodel[channels[i]]['detframe'].pop(0) dirmodel[channels[i]]['detframe'].append(1) dirmodel[channels[i]]['preim'] = im0 dirmodel[channels[i]]['oripreim'] = imc dirmodel[channels[i]]['posttime'] = videotime[i] print(dirmodel[channels[i]]['detframe']) #self.dirmodel[channels[i]]['imgtime'] = videotime[i] else: #print(f'deti= {i}') #print(detframe[i]) #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]: dirmodel[channels[i]]['detframe'].pop(0) dirmodel[channels[i]]['detframe'].append(0) print(dirmodel[channels[i]]['detframe']) #self.dirmodel[channels[i]]['imgtime'] = videotime[i] #print(detframe[i]) # Stream results #im0 = annotator.result() #print(f'i = {i}') #print(channels[i]) #print(postpretime[i]) #print(detframe[i]) if not dirmodel[channels[i]]['detflag'] and dirmodel[channels[i]]['detframe'].count(1)>=1: dirmodel[channels[i]]['detflag'] = True dirmodel[channels[i]]['detpretime'] = time.time() elif dirmodel[channels[i]]['detframe'].count(1)==0 : dirmodel[channels[i]]['detflag'] = False dirmodel[channels[i]]['detpretime'] = float('inf') # Stream results #im0 = annotator.result() if time.time() - dirmodel[channels[i]]['postpretime'] >30 and time.time() - dirmodel[channels[i]]['detpretime'] > dirmodel[channels[i]]['durtime'] and dirmodel[channels[i]]['detflag']: #print() #if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and self.dirmodel[channels[i]]['detflag']: #print(time.time() - self.dirmodel[channels[i]]['detpretime']) #print(self.dirmodel[channels[i]]['detflag']) #print(f'{Path(self.opt.weights).stem}post----{time.strftime("%Y-%m-%d %H:%M:%S", time.time()}') #time.sleep(30) #print(time.time() - postpretime[i]) #print('111111111111111111111111111111111111111111111111') #print(dirmodel[channels[i]]['preim'].shape) success, encoded_image = cv2.imencode('.jpg', dirmodel[channels[i]]['preim']) content = encoded_image.tobytes() successori, encoded_imageori = cv2.imencode('.jpg', dirmodel[channels[i]]['oripreim']) contentori = encoded_imageori.tobytes() filename = f'{p.stem}_{int(time.time())}.jpg' filenameori = f'ori{p.stem}_{int(time.time())}.jpg' print(f'str(p) {p.name}') print(channels[i]) payload = {'channel': dirmodel[channels[i]]['channel'], 'classIndex': dirmodel[channels[i]]['classindex'], 'ip': dirmodel[channels[i]]['algip'], 'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', dirmodel[channels[i]]['posttime']), 'videoUrl': channels[i]} fourcc = cv2.VideoWriter_fourcc(*'MP4V') fps = 6 height,width,_ = dirmodel[channels[i]]['preim'].shape year=time.strftime('%Y',time.localtime(time.time())) month=time.strftime('%m',time.localtime(time.time())) day=time.strftime('%d',time.localtime(time.time())) vidsavefold = f'/mnt/yolo/videos/{Path(weights).stem}/{year}/{month}/{day}' vidsaveflod = Path(vidsavefold) vidsaveflod.mkdir(parents=True, exist_ok=True) timesave = time.strftime('%Y-%m-%d-%H:%M:%S', dirmodel[channels[i]]['posttime']) #out = cv2.VideoWriter(f'{vidsaveflod}/{timesave}.mp4', fourcc, fps, (width, height)) #for imgframe in self.dirmodel[channels[i]]['framelist']: #success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim']) #content = encoded_image.tobytes() # out.write(imgframe) #out.release() files = [ ('file', (filename, content, 'image/jpeg')), ('oldFile',(filenameori, contentori, 'image/jpeg')), #('videoFile',open(f'{vidsaveflod}/{timesave}.mp4','rb')) ] print(f'{Path(weights).stem}post----{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))}') try: # if self.dirmodel[channels[i]]['postvidpretime'] != self.dirmodel[channels[i]]['posttime'] and not self.dirmodel[channels[i]]['postvideo']: resulttext = requests.post(url, data=payload, files=files) # self.dirmodel[channels[i]]['postvidpretime'] = self.dirmodel[channels[i]]['posttime'] # self.dirmodel[channels[i]]['postid'] = resulttext.json()['data'] # self.dirmodel[channels[i]]['postframe'] = 0 # self.dirmodel[channels[i]]['postvideo'] = True # self.dirmodel[channels[i]]['videoname'] = f'{vidsaveflod}/{timesave}.mp4' #videoqueuea.append(resulttext,f'{vidsaveflod}/{timesave}.mp4',) print(f'resulttext = {resulttext.json()["data"]}') except Exception: print("posterror") #if self.dirmodel[channels[i]]['postvideo']: #print(f'resulttext = {resulttext}') #time.sleep(3000) dirmodel[channels[i]]['postpretime'] = time.time() dirmodel[channels[i]]['detflag'] = False #timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime']) #year=time.strftime('%Y',time.localtime(time.time())) #month=time.strftime('%m',time.localtime(time.time())) #day=time.strftime('%d',time.localtime(time.time())) savefold = f'/mnt/yolo/images/{Path(weights).stem}/{year}/{month}/{day}' saveflod = Path(savefold) detsavefold = f'/mnt/yolo/detimages/{Path(weights).stem}/{year}/{month}/{day}' detsavefold = Path(detsavefold) saveflod.mkdir(parents=True, exist_ok=True) detsavefold.mkdir(parents=True, exist_ok=True) cv2.imwrite(f'{savefold}/{timesave}.jpg',dirmodel[channels[i]]['oripreim']) cv2.imwrite(f'{detsavefold}/{timesave}det.jpg',dirmodel[channels[i]]['preim']) self.view_img = False if self.view_img: if platform.system() == 'Linux' and p not in windows: windows.append(p) cv2.namedWindow(f'{str(p)}-{Path(weights).stem}', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(f'{str(p)}-{Path(weights).stem}', im0.shape[1], im0.shape[0]) im1 = cv2.resize(im0, (1280, 720)) cv2.imshow(f'{str(p)}-{Path(weights).stem}', im1) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) # Print time (inference-only) print(f'channels[i]={channels[i]}') LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms {time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))} {Path(weights).stem}") # def load(self): # conn = sqlite3.connect(self.source) # c = conn.cursor() # while True: # # # # print ("数据库打开成功") # # cursor = c.execute( # "SELECT modelname, addstream,delstream,streaming from CHANGESTREAM WHERE modelname= (?)",(Path(self.opt.weights).stem,)) # # content = cursor.fetchall() # # if content[0][1] ==1 or content[0][2] ==1: # # c.execute("UPDATE CHANGESTREAM set streamimg = 1 where modelname='yolov5s'") # # print("updata changestream") # # conn.commit() # # cursor = c.execute( # # "SELECT modelname, addstream,delstream,streamimg from CHANGESTREAM WHERE modelname='yolov5s'") # self.contentid = cursor.fetchall() # #global tag # #tag = Value('i', self.contentid[0][3]) # #print(tag.value==1) # print(f'loadcontent={self.contentid[0][3]}') # time.sleep(3) # c.close() # conn.close() def readpoint(self,weights): #conn = sqlite3.connect(self.source) #c = conn.cursor() #cursor = c.execute( # "SELECT address,fence,point,channel,classindex,ip ,algip,label, durtime from STREAM WHERE modelname= (?)", # (Path(self.opt.weights).stem,)) data = { "algorithmCode": algmodel[Path(weights).stem], "deviceIp":None } dirmodel = {} result = requests.post(url=urlt,data=data).json()['data'] channell=[] for info in result: #content = cursor.fetchall() #self.dirmodel = {} #for address,fence,point,channel,classindex,ip ,algip,label,durtime in content: #address = f'{address[:-1]}0' channel = info["deviceChannel"] channell.append(channel) dirmodel[channel] = {} dirmodel[channel]['fence'] = 1 if len(info["electricFence"])>0 else 0 #self.dirmodel[channel]['point'] = point dirmodel[channel]['channel'] = info['deviceChannel'] dirmodel[channel]['classindex'] = info['algorithmCode'] dirmodel[channel]['ip'] = info['deviceIp'] dirmodel[channel]['algip'] = info['deviceAlgorithmIp'] dataele = { "algorithmCode": dirmodel[channel]['classindex'], "algorithmIp":dirmodel[channel]['algip'], "channel":dirmodel[channel]['channel'] } resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections'] resultele = resultele.split(',||') resultele = tuple(resultele) point = '%s:'*len(resultele) %resultele if Path(weights).stem == 'personcount': resultper = requests.post(url=urlperson,data=dataele).json()['data'] personcountdir[channel] = resultper dirmodel[channel]['point'] = point[:-2] dirmodel[channel]['preim'] = None dirmodel[channel]['oripreim'] = None dirmodel[channel]['detframe'] = [0 for _ in range(2)] dirmodel[channel]['postpretime'] = 0 dirmodel[channel]['detflag'] = False dirmodel[channel]['detpretime'] = float('inf') dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']] dirmodel[channel]['durtime'] = 0 dirmodel[channel]['posttime'] = 0 dirmodel[channel]['track_history'] = defaultdict(lambda: []) dirmodel[channel]['time_stamps'] = defaultdict(lambda: deque(maxlen=50)) dirmodel[channel]['instantaneous_velocities'] = defaultdict(lambda: deque(maxlen=30)) #tmp = f'{weburl}/{channel}/{info["deviceAlgorithmIp"]}' #dirmodel[channel]['web'] = f'{weburl}/{info["deviceId"]}/{info["algorithmCode"]}' print(dirmodel) return sorted(channell),dirmodel #def strtolst(self,str): # str = str.split(":") # lista = [] # for liststr in str: # if len(liststr) > 0: # li = liststr.split(',') # listpoint = [] # for i, j in zip(li[::2], li[1::2]): # listpoint.append((i, j)) # lista.append(listpoint) # return listpoint #def preprocess(): # print('preprocess-----------------------------------------------') # def getframe(queue,channelsl,source,tt,numworks,lock,numworkv): # while True: # print("dataloader") # imgsz = [640, 640] # print(f'source = {source}') # print(f'inchannel ={channelsl}') # # bs = len(dataset) # vid_path, vid_writer = [None] * bs, [None] * bs # # self.detframe = [[0 for _ in range(8)] for i in range(bs)] # # self.postpretime = [0]*bs # # Run inference # # #imgsz = (1 , 3, *self.imgsz) # print(imgsz) # #self.model.warmup(imgsz=(1 , 3, *imgsz)) # warmup # seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) # sourcebase = 'project.db' # # # # print ("数据库打开成功") # pretime = time.time() # tag = 0 # for path, im, im0s, vid_cap, s, videotime,channels in dataset: # # print('*'*21) # # global tag # if time.time()-pretime > 30: # channellist = [] # pretime = time.time() # data = { # "algorithmCode": None, # "deviceIp":None # } # result = requests.post(url=urlt,data=data).json()['data'] # for info in result: # channellist.append((info['deviceChannel'],info['playbackAddress'])) # channelsa = [] # sourcea = [] # channellist = set(channellist) # channellist = sorted(channellist,key=lambda x:x[0]) # #channellist = set(channellist) # for cha,add in channellist: # channelsa.append(cha) # sourcea.append(add) # channelsl = sorted(channelsl) # #channelsa = sorted(channelsa) # if channelsa!=channelsl: # print(f'channelsa = {channelsa}') # print(f'channelsl = {channelsl}') # dataset.close() # channelsl = channelsa # source = sourcea # break; # # #conn = sqlite3.connect(sourcebase) # #c = conn.cursor() # #cursor = c.execute("SELECT modelname, addstream,delstream,streaming from CHANGESTREAM WHERE modelname = 'stream'") # #contentid = cursor.fetchall() # #tag = contentid[0][3] # #if tag == 1: # # lock.acquire() # # numworkv.value += 1 # # dataset.close() # # if numworkv.value==3: # # print('newstreaming=', tag) # # conn = sqlite3.connect(source) # # c = conn.cursor() # # c.execute("UPDATE CHANGESTREAM set streaming = 0 , addstream=0,delstream=0 where modelname= 'helmet'") # # print(123) # # conn.commit() # # c.close() # # conn.close() # # lock.release() # # print('opencv1') # # cv2.destroyAllWindows() # # print('opencv') # # break # #else: # # print('nonewstreaming=', tag) # queue.put((path, im, im0s, vid_cap, s, videotime,channels)) # queue.get() if queue.qsize() > 3 else time.sleep(0.001) # # def getmutpro(channels,source,streamlist,numworkv,lock,numworks=1): # processlist = [] # queuelist = [] # for i in range(numworks): # queue = Queue(maxsize=4) # process = Process(target=getframe, # args=(queue, channels,source, i,numworks,lock,numworkv)) # processlist.append(process) # process.start() # queuelist.append(queue) # return queuelist # #path = [] # #im0s = [] # #vid_cap = None # #s = '' # #videotime = [] # #while True: # # imlist = [] # # pathlist = [] # # im0slist = [] # # channelslist = [] # # vid_cap = None # # s = '' # # videotimelist = [] # # for q in queuelist: # # if q.qsize()>0: # # setframe = q.get() # # path, im, im0s, vid_cap, s, videotime,channels = setframe # # channelslist += channels # # pathlist += path # # im0slist += im0s # # videotimelist += videotime # # imlist.append(im) # # if len(imlist)>0: # # im = np.concatenate(imlist) # # if len(pathlist)>0: # # print(len(path),im.shape,len(im0s)) # # streamlist.append((pathlist, im, im0slist, vid_cap, s, videotimelist,channelslist)) # #print(f'streamlist = {len(streamlist)}') # # streamlist.pop(0) if len(streamlist) > 3 else time.sleep(0.001) def modelfun(detectdemo,weights,classify=False,conf_thres=0.80,device='',runmodel=None): print(weights) #detectdemo=Detect(weights=weights,source=sourcedb,classify=classify,conf_thres=conf_thres,device=device) detectdemo.infer(weights,classify,conf_thres,device,runmodel) #detectdemo.infer(weights, classify, conf_thres, device, runmodel) def detectmut(channellist,source,modellist,contentlist,modeladir,runmodel={},deviceid=''): detectdemo = Detect(channelsl=channellist,source=source) # while True: for modelname in modellist: if modelname in contentlist: #if modelname not in runmodel: #print(i) #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt') #if modelname in ['fall','helmet','bag','arm']: print(f'weights ={modelname}.pt deviceid {deviceid}') #c.execute('select conf,cla from changestream where modelname = (?)',(modelname,)) #rea = c.fetchall() process = threading.Thread(target=modelfun,args=(detectdemo,f'{modelname}.pt',modeladir[modelname]['cla'],modeladir[modelname]['conf'],deviceid,runmodel)) #elif modelname in ['personcount','persontre']: # process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,False,0.50,'',runmodel)) #elif modelname in ['uniform']: # process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,True,0.50,'',runmodel)) #else: # process = Process(target=modelfun,args=(streamlist,f'{modelname}.pt',dbpath)) #runmodel[modelname] = 1 #processes.append(process) #process.start() #detectobj = Process(target=detectdemo.infer,args=(queue,)) # Detect(weights=f'{i[0]}.pt') time.sleep(3) process.start() def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL') opt = parser.parse_args() return opt def main(opt): check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) run(**vars(opt)) if __name__ == '__main__': torch.multiprocessing.set_start_method('spawn') #set_start_method('spawn') #multiprocessing.set_start_method('spawn') torch.cuda.set_per_process_memory_fraction(0.6) opt = parse_opt() dbpath = 'project.db' conn = sqlite3.connect(dbpath) # # print ("数据库打开成功") c = conn.cursor() task(c,conn,urlt,urla) cursor = c.execute('select channel,algip from stream where modelname = "run"') result = cursor.fetchall() for channel ,algip in result: data = { "channel": channel, "ip":algip } #personcountdir[channel] = num address = requests.post(url=urlrtsp,data=data).json()['msg'] c.execute('UPDATE STREAM set address= (?) where channel =(?)',(address,channel)) conn.commit() cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'run'") #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'") content = cursor.fetchall() contentlist = [] for con in content: contentlist.append(con[0]) #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'") cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname = 'run'") contenta = cursor.fetchall() source = [] modellist = [] addcha = [] channellist = [] for i in contenta: addcha.append((i[0],i[2])) modellist.append(i[1]) addcha = set(addcha) addcha = sorted(addcha,key=lambda x:x[1]) for a,cha in addcha: source.append(a) channellist.append(cha) print(addcha) #source = set(source) source = list(source) #source.sort() modellist = set(modellist) n = len(content) print(f'modelname={n}') print(content) #content.reverse() print(content) # main(opt) #processes = [] streamqueue = Queue(maxsize=4) numworkv = Value('i', 0) manager = Manager() lock = multiprocessing.Lock() streamlist = manager.list() numworks = 2 #queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks) #thread.start() #videoqueue = Queue(maxsize=20) #thread1 = threading.Thread(target=postvideo, args=(videoqueue,)) #thread1.start() #pool = ThreadPoolExecutor(max_workers=n) #runmodel = manager.dict() modeladir = {} for modelname in modellist: if modelname in contentlist: #if modelname not in runmodel: #print(i) #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt') #if modelname in ['fall','helmet','bag','arm']: print(f'weights ={modelname}.pt') c.execute('select conf,cla from changestream where modelname = (?)',(modelname,)) rea = c.fetchall() #print(f'rea') modeladir.setdefault(modelname,{}) modeladir[modelname]['conf'] = rea[0][0] modeladir[modelname]['cla'] = rea[0][1] runmodel = {} for deviceid,num in enumerate(range(0,len(channellist),13)): process = Process(target=detectmut,args=(channellist[num:num+13],source[num:num+13],modellist,contentlist,modeladir,{},0)) time.sleep(3) process.start() #deviceid = deviceid+1 # detectdemo = Detect(channelsl=channellist[0:num],source=source[0:num]) # # while True: # for modelname in modellist: # if modelname in contentlist: # if modelname not in runmodel: # #print(i) # #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt') # #if modelname in ['fall','helmet','bag','arm']: # print(f'weights ={modelname}.pt') # c.execute('select conf,cla from changestream where modelname = (?)',(modelname,)) # rea = c.fetchall() # process = threading.Thread(target=modelfun,args=(detectdemo,f'{modelname}.pt',rea[0][1],rea[0][0],'',runmodel)) # #elif modelname in ['personcount','persontre']: # # process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,False,0.50,'',runmodel)) # #elif modelname in ['uniform']: # # process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,True,0.50,'',runmodel)) # #else: # # process = Process(target=modelfun,args=(streamlist,f'{modelname}.pt',dbpath)) # runmodel[modelname] = 1 # #processes.append(process) # #process.start() # #detectobj = Process(target=detectdemo.infer,args=(queue,)) # # Detect(weights=f'{i[0]}.pt') # time.sleep(3) # process.start() # time.sleep(900) # task(c,conn,urlt,urla) # cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'fall' or modelname = 'uniform' or modelname = 'personcount' or modelname = 'arm' or modelname = 'bag'") # #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'") # content = cursor.fetchall() # contentlist = [] # for con in content: # contentlist.append(con[0]) # #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'") # cursor = c.execute("SELECT address,modelname,channel from STREAM ") # contenta = cursor.fetchall() # source = [] # modellist = [] # addcha = [] # channellist = [] # for i in contenta: # addcha.append((i[0],i[2])) # modellist.append(i[1]) # addcha = set(addcha) # addcha = sorted(addcha) # for a,cha in addcha: # source.append(a) # channellist.append(cha) # print(addcha) # #source = set(source) # source = list(source) # #source.sort() # modellist = set(modellist) # n = len(content) # print(f'modelname={n}') # print(content) # #content.reverse() # print(content) #pool.submit(detectobj.infer) #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'") #content = cursor.fetchall() #n = len(content) #print(f'modelname={n}') #print(content) #content.reverse() #print(content) # main(opt) #processes = [] #pool = ProcessPoolExecutor(max_workers=n) #for i in content: #print(i) #detectdemo=Detect(weights=f'{i[0]}.pt') #process = Process(target=detectdemo.infer) #processes.append(process) #process.start() #detectobj = Detect(weights=f'{i[0]}.pt') # time.sleep(3) #pool.submit(detectobj.infer) # print('111111111111111111111111111111111111111111111111111111111') #pool.submit(TestA().func1, '张三', i) #print('----------------------------------------------------------------') #time.sleep(3000) # 等待所有进程执行完毕 #for process in processes: # process.join() #pool.submit(Detect(weights=f'{i[0]}.pt').infer) # if isinstance(opt.weights,list): # opt.weights = opt.weights[0] #signal.signal(signal.SIGINT, my_handler) #detectdemo1 = Detect(weights=f'{content[0][0]}.pt') #detectdemo1.infer() #a = Test #with ProcessPoolExecutor(3) as ppool: #for i in range(3): # print('hello') #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s) #ppool.submit(TestA().func1, '张三', i) #ta = TestA() #with ProcessPoolExecutor(5) as ppool: # 创建一个5个进程的进程池 # for i in range(1, 4): # ppool.submit(func1, '张三', i) #f1= pool.submit(detectdemo1.infer) # print("线程1-----------------------------------------------------------------------------------") #detectdemo2 = Detect(weights=r"helmet.pt") #f2=pool.submit(detectdemo2.infer) # print("线程2-------------------------------------------------------------------------------------") #detectdemo3 = threading.Thread(target=detectdemo3.infer) #detectdemo3 = Detect(weights=r"fall.pt") #f3=pool.submit(detectdemo3.infer)