123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857 |
- # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
- """
- Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
- Usage - sources:
- $ python detect.py --weights yolov5s.pt --source 0 # webcam
- img.jpg # image
- vid.mp4 # video
- screen # screenshot
- path/ # directory
- list.txt # list of images
- list.streams # list of streams
- 'path/*.jpg' # glob
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
- 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
- Usage - formats:
- $ python detect.py --weights yolov5s.pt # PyTorch
- yolov5s.torchscript # TorchScript
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
- yolov5s_openvino_model # OpenVINO
- yolov5s.engine # TensorRT
- yolov5s.mlmodel # CoreML (macOS-only)
- yolov5s_saved_model # TensorFlow SavedModel
- yolov5s.pb # TensorFlow GraphDef
- yolov5s.tflite # TensorFlow Lite
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
- yolov5s_paddle_model # PaddlePaddle
- """
- import matplotlib.path as mat
- import requests
- import argparse
- import os
- import platform
- import sqlite3
- import sys
- import threading
- import time
- from pathlib import Path
- import signal
- import torch
- from concurrent.futures import ThreadPoolExecutor
- from concurrent.futures import ProcessPoolExecutor
- from multiprocessing import Process,Manager,Value
- from multiprocessing import Queue
- from multiprocessing import set_start_method
- import multiprocessing
- import multiprocessing as mp
- import numpy as np
- import platform
- import pathlib
- from collections import defaultdict, deque
- #import websockets
- import asyncio
- FILE = Path(__file__).resolve()
- ROOT = FILE.parents[0] # YOLOv5 root directory
- if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
- import math
- from models.common import DetectMultiBackend
- from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLTN
- from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
- increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,strtolstl,apply_classifier1,task)
- from utils.plots import Annotator, colors, save_one_box
- from utils.torch_utils import select_device, smart_inference_mode
- #from testpool import func1,TestA
- from ultralytics import YOLO
- from person_jump_check import personJump
- # def my_handler(signum, frame):
- # exit(0)
- #url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
- plt = platform.system()
- if plt != 'Windows':
- pathlib.WindowsPath = pathlib.PosixPath
- urlhead = "http://172.19.152.231"
- url = f"{urlhead}/open/api/operate/upload"
- urlele = f"{urlhead}/open/api/operate/fence"
- urlperson = f"{urlhead}/open/api/operate/getPersonLimitNum"
- urlt = f"{urlhead}/open/api/operate/taskList"
- urla = f"{urlhead}/open/api/operate/algorithmList"
- weburl = f"ws://36.7.84.146:28801/websocket/device"
- personcountdir = {}
- algdir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'duty','98':'face','51':'run','64':'jump'}
- modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person','64':'person'}
- modelalgdir = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51','jump':'64'}
- algmodel = {}
- for key,value in algdir.items():
- algmodel[value] = key
- def map_to_ellipse(position):
- x, y = position
- center_x = 640
- center_y = 360
- a = 640
- b = 360
- x_norm = x / 1280
- y_norm = y / 720
- d_norm = math.sqrt((x_norm - 0.5) ** 2 + (y_norm - 0.5) ** 2)
- theta_norm = math.atan2(y_norm - 0.5, x_norm - 0.5)
- f = d_norm
- a_new = a * f
- b_new = b * f
- bias_x = center_x + a_new * math.cos(theta_norm)
- bias_y = center_y + b_new * math.sin(theta_norm)
- return np.array([bias_x, bias_y])
- class YoloOpt:
- def __init__(self, weights=ROOT / 'yolov5s.pt',source=ROOT / 'data/images',data=ROOT / 'data/coco128.yaml',
- imgsz=(640,640),
- conf_thres=0.80,
- iou_thres=0.45,
- max_det=1000,
- device='',
- view_img=False,
- save_txt=False,
- save_conf=False,
- save_crop=False,
- nosave=True,
- classes=None,
- agnostic_nms=False,
- augment=False,
- visualize=False,
- update=False,
- project=ROOT / 'runs/detect',
- name='exp',
- exist_ok=False,
- line_thickness=1,
- hide_labels=False,
- hide_conf=False,
- half=False,
- dnn=False,
- vid_stride=10,
- classify=False):
- self.weights = weights # 权重文件地址
- self.source = source # 待识别的图像
- self.data = data
- if imgsz is None:
- self.imgsz = (640, 640)
- self.imgsz = imgsz # 输入图片的大小,默认 (640,640)
- self.conf_thres = conf_thres # object置信度阈值 默认0.25 用在nms中
- self.iou_thres = iou_thres # 做nms的iou阈值 默认0.45 用在nms中
- self.device = device # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法
- self.view_img = view_img # 是否展示预测之后的图片或视频 默认False
- self.classes = classes # 只保留一部分的类别,默认是全部保留
- self.agnostic_nms = agnostic_nms # 进行NMS去除不同类别之间的框, 默认False
- self.augment = augment # augmented inference TTA测试时增强/多尺度预测,可以提分
- self.update = update # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
- self.exist_ok = exist_ok # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
- self.project = project # 保存测试日志的参数,本程序没有用到
- self.name = name # 每次实验的名称,本程序也没有用到
- self.max_det = max_det
- self.save_txt = save_txt
- self.save_conf= save_conf
- self.save_crop= save_crop
- self.nosave = nosave
- self.visualize = visualize
- self.line_thickness = line_thickness
- self.hide_labels = hide_labels
- self.hide_conf = hide_conf
- self.half = half
- self.dnn = dnn
- self.vid_stride = vid_stride
- self.classify = classify
- class Detect:
- def __init__(self, weights = ROOT / 'yolov5s.pt' , imgsz=(640,640),source="changshusql1103.db",classify=False,conf_thres=0.80,device='',channelsl=''):
- print(f'detectweights = {weights}')
- self.opt = YoloOpt(weights=weights, imgsz=imgsz,source=source,classify=classify,conf_thres=conf_thres,device=device)
- self.source = str(self.opt.source)
- self.save_img = not self.opt.nosave and not source.endswith('.txt') # save inference images
- is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
- is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
- self.webcam = True
- screenshot = self.source.lower().startswith('screen')
- if is_url and is_file:
- self.source = check_file(self.source) # download
- self.save_dir = increment_path(Path(self.opt.project) / self.opt.name, exist_ok=self.opt.exist_ok) # increment run
- #self.save_dir = self.save_dir / Path(self.opt.weights).stem
- #self.save_dir.mkdir(parents=True, exist_ok=True)
- (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) # make dir
- #self.imgsz = check_img_size(self.opt.imgsz, s=self.stride)
- # self.readchannel = self.readpoint()
- #print(self.imgsz)
- self.updatetime = time.time()
- #self.filetime = os.path.getmtime(self.opt.weights)
- bs = 1 # batch_size
- if self.webcam:
- #self.view_img = check_imshow(warn=True)
- self.view_img = False
- # dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
- tt= 0
- numworks = 1
- self.dataset = LoadStreamsSQLTN(channelsl, source, img_size=640,
- auto=True, vid_stride=1, tt=tt, numworks=numworks)
- # bs = len(dataset)
- elif screenshot:
- dataset = LoadScreenshots(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt)
- else:
- self.dataset = LoadImages(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt, vid_stride=self.opt.vid_stride)
- self.speed_threshold = 40
- self.high_velocity_count_threshold = 6
- # t1 = threading.Thread(target=self.load,daemon=True)
- # t1.start()
- @smart_inference_mode()
- def infer(self,weights,classify,conf_thres,device,runmodel):
- device = select_device(device)
- print("loadmodel device{device}")
- #region_points = [(820, 200), (1600, 200), (1600, 530), (820, 530)]
- #region_points = [[259, 200], [2258, 200], [2258, 943], [259, 943]]
- #region_points = [[374,1224],[380,1237],[426,1237],[435,1237],[479,1230],[527,1201],[552,1187],[575,1166],[583,1155],[585,1136],[583,1126],[558,1124],[485,1178],[374,1210]]
- region_points = [(214, 519),(235, 512),(251, 503),(269, 496),(283, 486),(296, 482),(302, 493),(296, 507),(283, 517),(275, 526),(247, 533),(227, 538),(209, 538),(207, 529),(203, 521)]
- counter = personJump(
- # show_in = False,
- # show_out = False,
- show=False,
- region=region_points,
- model=weights,
- classes=[0],
- conf=0.4,
- )
- # stride, names, pt = model.stride, model.names, model.pt
- if classify:
- classifier_model = torch.load(f"{Path(weights).stem}cls.pt")
- classifier_model = classifier_model.to(device)
- classifier_model.eval()
- print('classify--------------------------------------------------------------------')
- #imgsz = check_img_size(self.opt.imgsz, s=stride)
- #model.warmup(imgsz=(1, 3, *imgsz))
- readchannel,dirmodel = self.readpoint(weights)
- #print(imgsz)
- updatetime = time.time()
- filetime = os.path.getmtime(weights)
- #selfreadpoint();
- pretime = time.time()
- seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
- #
- # print ("数据库打开成功")
- #async with websockets.connect(uri) as websocket:
- # for key in dirmodel.keys():
- # dirmodel[key]['websoc'] = await websockets.connect(dirmodel[key]['web'])
- for path, im, im0s, vid_cap, s, videotime,channels in self.dataset:
- hour = time.localtime(time.time()).tm_hour
- if hour not in range(7,18):
- time.sleep(30)
- continue
- i = 0
- ima = im0s[i].copy()
- imc = ima.copy()
- flag = False
- #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
- #if len(queue) == 0:
- # print('queuezero')
- # time.sleep(0.01)
- #print(123)
- im0 = counter.count(ima)
- #print(456)
- if counter.in_count > 0:
- flag = True
- counter.in_count = 0
- #print(f'timee = {time.time()-timee}')
- #flag = True
- if flag:
- #if self.dirmodel[path[i]]['imgtime'] != videotime[i]:
- dirmodel[channels[i]]['detframe'].pop(0)
- dirmodel[channels[i]]['detframe'].append(1)
- dirmodel[channels[i]]['preim'] = im0
- dirmodel[channels[i]]['oripreim'] = imc
- dirmodel[channels[i]]['posttime'] = videotime[i]
- print(dirmodel[channels[i]]['detframe'])
- #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
- else:
- #print(f'deti= {i}')
- #print(detframe[i])
- #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
- dirmodel[channels[i]]['detframe'].pop(0)
- dirmodel[channels[i]]['detframe'].append(0)
- print(dirmodel[channels[i]]['detframe'])
- #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
- #print(detframe[i])
- # Stream results
- #im0 = annotator.result()
- #print(f'i = {i}')
- #print(channels[i])
- #print(postpretime[i])
- #print(detframe[i])
- if not dirmodel[channels[i]]['detflag'] and dirmodel[channels[i]]['detframe'].count(1)>=1:
- dirmodel[channels[i]]['detflag'] = True
- dirmodel[channels[i]]['detpretime'] = time.time()
- elif dirmodel[channels[i]]['detframe'].count(1)==0 :
- dirmodel[channels[i]]['detflag'] = False
- dirmodel[channels[i]]['detpretime'] = float('inf')
- # Stream results
- #im0 = annotator.result()
- if time.time() - dirmodel[channels[i]]['postpretime'] >30 and time.time() - dirmodel[channels[i]]['detpretime'] > dirmodel[channels[i]]['durtime'] and dirmodel[channels[i]]['detflag']:
- #print()
- #if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and self.dirmodel[channels[i]]['detflag']:
- #print(time.time() - self.dirmodel[channels[i]]['detpretime'])
- #print(self.dirmodel[channels[i]]['detflag'])
- #print(f'{Path(self.opt.weights).stem}post----{time.strftime("%Y-%m-%d %H:%M:%S", time.time()}')
- #time.sleep(30)
- #print(time.time() - postpretime[i])
- #print('111111111111111111111111111111111111111111111111')
- #print(dirmodel[channels[i]]['preim'].shape)
- success, encoded_image = cv2.imencode('.jpg', dirmodel[channels[i]]['preim'])
- content = encoded_image.tobytes()
- successori, encoded_imageori = cv2.imencode('.jpg', dirmodel[channels[i]]['oripreim'])
- contentori = encoded_imageori.tobytes()
- filename = f'0_{int(time.time())}.jpg'
- filenameori = f'0_{int(time.time())}.jpg'
- #print(f'str(p) {p.name}')
- print(channels[i])
- payload = {'channel': dirmodel[channels[i]]['channel'],
- 'classIndex': dirmodel[channels[i]]['classindex'],
- 'ip': dirmodel[channels[i]]['algip'],
- 'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', dirmodel[channels[i]]['posttime']),
- 'videoUrl': channels[i]}
- fourcc = cv2.VideoWriter_fourcc(*'MP4V')
- fps = 6
- height,width,_ = dirmodel[channels[i]]['preim'].shape
- year=time.strftime('%Y',time.localtime(time.time()))
- month=time.strftime('%m',time.localtime(time.time()))
- day=time.strftime('%d',time.localtime(time.time()))
- vidsavefold = f'/mnt/yolo/videos/{Path(weights).stem}/{year}/{month}/{day}'
- vidsaveflod = Path(vidsavefold)
- vidsaveflod.mkdir(parents=True, exist_ok=True)
- timesave = time.strftime('%Y-%m-%d-%H:%M:%S', dirmodel[channels[i]]['posttime'])
- #out = cv2.VideoWriter(f'{vidsaveflod}/{timesave}.mp4', fourcc, fps, (width, height))
- #for imgframe in self.dirmodel[channels[i]]['framelist']:
- #success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim'])
- #content = encoded_image.tobytes()
- # out.write(imgframe)
- #out.release()
- files = [
- ('file', (filename, content, 'image/jpeg')),
- ('oldFile',(filenameori, contentori, 'image/jpeg')),
- #('videoFile',open(f'{vidsaveflod}/{timesave}.mp4','rb'))
- ]
- print(f'{Path(weights).stem}post----{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))}')
-
- try:
- # if self.dirmodel[channels[i]]['postvidpretime'] != self.dirmodel[channels[i]]['posttime'] and not self.dirmodel[channels[i]]['postvideo']:
- resulttext = requests.post(url, data=payload, files=files)
- # self.dirmodel[channels[i]]['postvidpretime'] = self.dirmodel[channels[i]]['posttime']
- # self.dirmodel[channels[i]]['postid'] = resulttext.json()['data']
- # self.dirmodel[channels[i]]['postframe'] = 0
- # self.dirmodel[channels[i]]['postvideo'] = True
- # self.dirmodel[channels[i]]['videoname'] = f'{vidsaveflod}/{timesave}.mp4'
- #videoqueuea.append(resulttext,f'{vidsaveflod}/{timesave}.mp4',)
- print(f'resulttext = {resulttext.json()["data"]}')
-
- except Exception:
- print("posterror")
- #if self.dirmodel[channels[i]]['postvideo']:
- #print(f'resulttext = {resulttext}')
- #time.sleep(3000)
- dirmodel[channels[i]]['postpretime'] = time.time()
- dirmodel[channels[i]]['detflag'] = False
- #timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime'])
- #year=time.strftime('%Y',time.localtime(time.time()))
- #month=time.strftime('%m',time.localtime(time.time()))
- #day=time.strftime('%d',time.localtime(time.time()))
- savefold = f'/mnt/yolo/images/{Path(weights).stem}/{year}/{month}/{day}'
- saveflod = Path(savefold)
- detsavefold = f'/mnt/yolo/detimages/{Path(weights).stem}/{year}/{month}/{day}'
- detsavefold = Path(detsavefold)
- saveflod.mkdir(parents=True, exist_ok=True)
- detsavefold.mkdir(parents=True, exist_ok=True)
- cv2.imwrite(f'{savefold}/{timesave}.jpg',dirmodel[channels[i]]['oripreim'])
- cv2.imwrite(f'{detsavefold}/{timesave}det.jpg',dirmodel[channels[i]]['preim'])
-
- self.view_img = False
- if self.view_img:
- if platform.system() == 'Linux' and p not in windows:
- windows.append(p)
- cv2.namedWindow(f'{str(p)}-{Path(weights).stem}',
- cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
- cv2.resizeWindow(f'{str(p)}-{Path(weights).stem}', im0.shape[1], im0.shape[0])
- im1 = cv2.resize(im0, (1280, 720))
- cv2.imshow(f'{str(p)}-{Path(weights).stem}', im1)
- cv2.waitKey(1) # 1 millisecond
- # Save results (image with detections)
- # Print time (inference-only)
- print(f'channels[i]={channels[i]}')
- #LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms {time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))} {Path(weights).stem}")
- # def load(self):
- # conn = sqlite3.connect(self.source)
- # c = conn.cursor()
- # while True:
- # #
- # # print ("数据库打开成功")
- #
- # cursor = c.execute(
- # "SELECT modelname, addstream,delstream,streaming from CHANGESTREAM WHERE modelname= (?)",(Path(self.opt.weights).stem,))
- # # content = cursor.fetchall()
- # # if content[0][1] ==1 or content[0][2] ==1:
- # # c.execute("UPDATE CHANGESTREAM set streamimg = 1 where modelname='yolov5s'")
- # # print("updata changestream")
- # # conn.commit()
- # # cursor = c.execute(
- # # "SELECT modelname, addstream,delstream,streamimg from CHANGESTREAM WHERE modelname='yolov5s'")
- # self.contentid = cursor.fetchall()
- # #global tag
- # #tag = Value('i', self.contentid[0][3])
- # #print(tag.value==1)
- # print(f'loadcontent={self.contentid[0][3]}')
- # time.sleep(3)
- # c.close()
- # conn.close()
- def readpoint(self,weights):
- #conn = sqlite3.connect(self.source)
- #c = conn.cursor()
- #cursor = c.execute(
- # "SELECT address,fence,point,channel,classindex,ip ,algip,label, durtime from STREAM WHERE modelname= (?)",
- # (Path(self.opt.weights).stem,))
- data = {
- "algorithmCode": algmodel[Path(weights).stem],
- "deviceIp":None
- }
- dirmodel = {}
- result = requests.post(url=urlt,data=data).json()['data']
- channell=[]
- for info in result:
- #content = cursor.fetchall()
- #self.dirmodel = {}
- #for address,fence,point,channel,classindex,ip ,algip,label,durtime in content:
- #address = f'{address[:-1]}0'
- channel = info["deviceChannel"]
- channell.append(channel)
- dirmodel[channel] = {}
- dirmodel[channel]['fence'] = 1 if len(info["electricFence"])>0 else 0
- #self.dirmodel[channel]['point'] = point
- dirmodel[channel]['channel'] = info['deviceChannel']
- dirmodel[channel]['classindex'] = info['algorithmCode']
- dirmodel[channel]['ip'] = info['deviceIp']
- dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
- dataele = {
- "algorithmCode": dirmodel[channel]['classindex'],
- "algorithmIp":dirmodel[channel]['algip'],
- "channel":dirmodel[channel]['channel']
- }
- resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
- resultele = resultele.split(',||')
- resultele = tuple(resultele)
- point = '%s:'*len(resultele) %resultele
- if Path(weights).stem == 'personcount':
- resultper = requests.post(url=urlperson,data=dataele).json()['data']
- personcountdir[channel] = resultper
- dirmodel[channel]['point'] = point[:-2]
- dirmodel[channel]['preim'] = None
- dirmodel[channel]['oripreim'] = None
- dirmodel[channel]['detframe'] = [0 for _ in range(2)]
- dirmodel[channel]['postpretime'] = 0
- dirmodel[channel]['detflag'] = False
- dirmodel[channel]['detpretime'] = float('inf')
- dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
- dirmodel[channel]['durtime'] = 0
- dirmodel[channel]['posttime'] = 0
- dirmodel[channel]['track_history'] = defaultdict(lambda: [])
- dirmodel[channel]['time_stamps'] = defaultdict(lambda: deque(maxlen=50))
- dirmodel[channel]['instantaneous_velocities'] = defaultdict(lambda: deque(maxlen=10))
- #tmp = f'{weburl}/{channel}/{info["deviceAlgorithmIp"]}'
- #dirmodel[channel]['web'] = f'{weburl}/{info["deviceId"]}/{info["algorithmCode"]}'
- print(dirmodel)
- return sorted(channell),dirmodel
- #def strtolst(self,str):
- # str = str.split(":")
- # lista = []
- # for liststr in str:
- # if len(liststr) > 0:
- # li = liststr.split(',')
- # listpoint = []
- # for i, j in zip(li[::2], li[1::2]):
- # listpoint.append((i, j))
- # lista.append(listpoint)
- # return listpoint
- #def preprocess():
- # print('preprocess-----------------------------------------------')
- # def getframe(queue,channelsl,source,tt,numworks,lock,numworkv):
- # while True:
- # print("dataloader")
- # imgsz = [640, 640]
- # print(f'source = {source}')
- # print(f'inchannel ={channelsl}')
- #
- # bs = len(dataset)
- # vid_path, vid_writer = [None] * bs, [None] * bs
- # # self.detframe = [[0 for _ in range(8)] for i in range(bs)]
- # # self.postpretime = [0]*bs
- # # Run inference
- #
- # #imgsz = (1 , 3, *self.imgsz)
- # print(imgsz)
- # #self.model.warmup(imgsz=(1 , 3, *imgsz)) # warmup
- # seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
- # sourcebase = 'project.db'
- # #
- # # print ("数据库打开成功")
- # pretime = time.time()
- # tag = 0
- # for path, im, im0s, vid_cap, s, videotime,channels in dataset:
- # # print('*'*21)
- # # global tag
- # if time.time()-pretime > 30:
- # channellist = []
- # pretime = time.time()
- # data = {
- # "algorithmCode": None,
- # "deviceIp":None
- # }
- # result = requests.post(url=urlt,data=data).json()['data']
- # for info in result:
- # channellist.append((info['deviceChannel'],info['playbackAddress']))
- # channelsa = []
- # sourcea = []
- # channellist = set(channellist)
- # channellist = sorted(channellist,key=lambda x:x[0])
- # #channellist = set(channellist)
- # for cha,add in channellist:
- # channelsa.append(cha)
- # sourcea.append(add)
- # channelsl = sorted(channelsl)
- # #channelsa = sorted(channelsa)
- # if channelsa!=channelsl:
- # print(f'channelsa = {channelsa}')
- # print(f'channelsl = {channelsl}')
- # dataset.close()
- # channelsl = channelsa
- # source = sourcea
- # break;
- #
- # #conn = sqlite3.connect(sourcebase)
- # #c = conn.cursor()
- # #cursor = c.execute("SELECT modelname, addstream,delstream,streaming from CHANGESTREAM WHERE modelname = 'stream'")
- # #contentid = cursor.fetchall()
- # #tag = contentid[0][3]
- # #if tag == 1:
- # # lock.acquire()
- # # numworkv.value += 1
- # # dataset.close()
- # # if numworkv.value==3:
- # # print('newstreaming=', tag)
- # # conn = sqlite3.connect(source)
- # # c = conn.cursor()
- # # c.execute("UPDATE CHANGESTREAM set streaming = 0 , addstream=0,delstream=0 where modelname= 'helmet'")
- # # print(123)
- # # conn.commit()
- # # c.close()
- # # conn.close()
- # # lock.release()
- # # print('opencv1')
- # # cv2.destroyAllWindows()
- # # print('opencv')
- # # break
- # #else:
- # # print('nonewstreaming=', tag)
- # queue.put((path, im, im0s, vid_cap, s, videotime,channels))
- # queue.get() if queue.qsize() > 3 else time.sleep(0.001)
- #
- # def getmutpro(channels,source,streamlist,numworkv,lock,numworks=1):
- # processlist = []
- # queuelist = []
- # for i in range(numworks):
- # queue = Queue(maxsize=4)
- # process = Process(target=getframe,
- # args=(queue, channels,source, i,numworks,lock,numworkv))
- # processlist.append(process)
- # process.start()
- # queuelist.append(queue)
- # return queuelist
- # #path = []
- # #im0s = []
- # #vid_cap = None
- # #s = ''
- # #videotime = []
- # #while True:
- # # imlist = []
- # # pathlist = []
- # # im0slist = []
- # # channelslist = []
- # # vid_cap = None
- # # s = ''
- # # videotimelist = []
- # # for q in queuelist:
- # # if q.qsize()>0:
- # # setframe = q.get()
- # # path, im, im0s, vid_cap, s, videotime,channels = setframe
- # # channelslist += channels
- # # pathlist += path
- # # im0slist += im0s
- # # videotimelist += videotime
- # # imlist.append(im)
- # # if len(imlist)>0:
- # # im = np.concatenate(imlist)
- # # if len(pathlist)>0:
- # # print(len(path),im.shape,len(im0s))
- # # streamlist.append((pathlist, im, im0slist, vid_cap, s, videotimelist,channelslist))
- # #print(f'streamlist = {len(streamlist)}')
- # # streamlist.pop(0) if len(streamlist) > 3 else time.sleep(0.001)
- def modelfun(detectdemo,weights,classify=False,conf_thres=0.80,device='',runmodel=None):
- print(weights)
- #detectdemo=Detect(weights=weights,source=sourcedb,classify=classify,conf_thres=conf_thres,device=device)
- detectdemo.infer(weights,classify,conf_thres,device,runmodel)
- #detectdemo.infer(weights, classify, conf_thres, device, runmodel)
- def detectmut(channellist,source,modellist,contentlist,modeladir,runmodel={},deviceid=''):
- detectdemo = Detect(channelsl=channellist,source=source)
- # while True:
- for modelname in modellist:
- if modelname in contentlist:
- #if modelname not in runmodel:
- #print(i)
- #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
- #if modelname in ['fall','helmet','bag','arm']:
- print(f'weights ={modelname}.pt deviceid {deviceid}')
- #c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
- #rea = c.fetchall()
- process = threading.Thread(target=modelfun,args=(detectdemo,f'{modelname}.pt',modeladir[modelname]['cla'],modeladir[modelname]['conf'],deviceid,runmodel))
- #elif modelname in ['personcount','persontre']:
- # process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,False,0.50,'',runmodel))
- #elif modelname in ['uniform']:
- # process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,True,0.50,'',runmodel))
- #else:
- # process = Process(target=modelfun,args=(streamlist,f'{modelname}.pt',dbpath))
- #runmodel[modelname] = 1
- #processes.append(process)
- #process.start()
- #detectobj = Process(target=detectdemo.infer,args=(queue,))
- # Detect(weights=f'{i[0]}.pt')
- time.sleep(3)
- process.start()
- def parse_opt():
- parser = argparse.ArgumentParser()
- parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
- opt = parser.parse_args()
- return opt
- def main(opt):
- check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
- run(**vars(opt))
- if __name__ == '__main__':
- torch.multiprocessing.set_start_method('spawn')
- #set_start_method('spawn')
- #multiprocessing.set_start_method('spawn')
- torch.cuda.set_per_process_memory_fraction(0.6)
- opt = parse_opt()
- dbpath = 'projectnew.db'
- conn = sqlite3.connect(dbpath)
- #
- # print ("数据库打开成功")
- c = conn.cursor()
- task(c,conn,urlt,urla)
- cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'jump'")
- #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
- content = cursor.fetchall()
- contentlist = []
- for con in content:
- contentlist.append(con[0])
- #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
- cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname = 'jump'")
- contenta = cursor.fetchall()
- source = []
- modellist = []
- addcha = []
- channellist = []
- for i in contenta:
- addcha.append((i[0],i[2]))
- modellist.append(i[1])
- addcha = set(addcha)
- addcha = sorted(addcha,key=lambda x:x[1])
- for a,cha in addcha:
- source.append(a)
- channellist.append(cha)
- print(addcha)
- #source = set(source)
- source = list(source)
- #source.sort()
- modellist = set(modellist)
- n = len(content)
- print(f'modelname={n}')
- print(content)
- #content.reverse()
- print(content)
- # main(opt)
- #processes = []
- streamqueue = Queue(maxsize=4)
- numworkv = Value('i', 0)
- manager = Manager()
- lock = multiprocessing.Lock()
- streamlist = manager.list()
- numworks = 1
- #queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks)
- #thread.start()
- #videoqueue = Queue(maxsize=20)
- #thread1 = threading.Thread(target=postvideo, args=(videoqueue,))
- #thread1.start()
- #pool = ThreadPoolExecutor(max_workers=n)
- #runmodel = manager.dict()
- modeladir = {}
- for modelname in modellist:
- if modelname in contentlist:
- #if modelname not in runmodel:
- #print(i)
- #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
- #if modelname in ['fall','helmet','bag','arm']:
- print(f'weights ={modelname}.pt')
- c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
- rea = c.fetchall()
- #print(f'rea')
- modeladir.setdefault(modelname,{})
- modeladir[modelname]['conf'] = rea[0][0]
- modeladir[modelname]['cla'] = rea[0][1]
- runmodel = {}
- for deviceid,num in enumerate(range(0,len(channellist),13)):
- process = Process(target=detectmut,args=(channellist[num:num+13],source[num:num+13],modellist,contentlist,modeladir,{},deviceid%2))
- time.sleep(3)
- process.start()
- #deviceid = deviceid+1
- # detectdemo = Detect(channelsl=channellist[0:num],source=source[0:num])
- # # while True:
- # for modelname in modellist:
- # if modelname in contentlist:
- # if modelname not in runmodel:
- # #print(i)
- # #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
- # #if modelname in ['fall','helmet','bag','arm']:
- # print(f'weights ={modelname}.pt')
- # c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
- # rea = c.fetchall()
- # process = threading.Thread(target=modelfun,args=(detectdemo,f'{modelname}.pt',rea[0][1],rea[0][0],'',runmodel))
- # #elif modelname in ['personcount','persontre']:
- # # process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,False,0.50,'',runmodel))
- # #elif modelname in ['uniform']:
- # # process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,True,0.50,'',runmodel))
- # #else:
- # # process = Process(target=modelfun,args=(streamlist,f'{modelname}.pt',dbpath))
- # runmodel[modelname] = 1
- # #processes.append(process)
- # #process.start()
- # #detectobj = Process(target=detectdemo.infer,args=(queue,))
- # # Detect(weights=f'{i[0]}.pt')
- # time.sleep(3)
- # process.start()
- # time.sleep(900)
- # task(c,conn,urlt,urla)
- # cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'fall' or modelname = 'uniform' or modelname = 'personcount' or modelname = 'arm' or modelname = 'bag'")
- # #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
- # content = cursor.fetchall()
- # contentlist = []
- # for con in content:
- # contentlist.append(con[0])
- # #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
- # cursor = c.execute("SELECT address,modelname,channel from STREAM ")
- # contenta = cursor.fetchall()
- # source = []
- # modellist = []
- # addcha = []
- # channellist = []
- # for i in contenta:
- # addcha.append((i[0],i[2]))
- # modellist.append(i[1])
- # addcha = set(addcha)
- # addcha = sorted(addcha)
- # for a,cha in addcha:
- # source.append(a)
- # channellist.append(cha)
- # print(addcha)
- # #source = set(source)
- # source = list(source)
- # #source.sort()
- # modellist = set(modellist)
- # n = len(content)
- # print(f'modelname={n}')
- # print(content)
- # #content.reverse()
- # print(content)
- #pool.submit(detectobj.infer)
- #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
- #content = cursor.fetchall()
- #n = len(content)
- #print(f'modelname={n}')
- #print(content)
- #content.reverse()
- #print(content)
- # main(opt)
- #processes = []
- #pool = ProcessPoolExecutor(max_workers=n)
- #for i in content:
- #print(i)
- #detectdemo=Detect(weights=f'{i[0]}.pt')
- #process = Process(target=detectdemo.infer)
- #processes.append(process)
- #process.start()
- #detectobj = Detect(weights=f'{i[0]}.pt')
- # time.sleep(3)
- #pool.submit(detectobj.infer)
- # print('111111111111111111111111111111111111111111111111111111111')
- #pool.submit(TestA().func1, '张三', i)
- #print('----------------------------------------------------------------')
- #time.sleep(3000)
- # 等待所有进程执行完毕
- #for process in processes:
- # process.join()
- #pool.submit(Detect(weights=f'{i[0]}.pt').infer)
- # if isinstance(opt.weights,list):
- # opt.weights = opt.weights[0]
- #signal.signal(signal.SIGINT, my_handler)
- #detectdemo1 = Detect(weights=f'{content[0][0]}.pt')
- #detectdemo1.infer()
- #a = Test
- #with ProcessPoolExecutor(3) as ppool:
- #for i in range(3):
- # print('hello')
- #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
- #ppool.submit(TestA().func1, '张三', i)
- #ta = TestA()
- #with ProcessPoolExecutor(5) as ppool: # 创建一个5个进程的进程池
- # for i in range(1, 4):
- # ppool.submit(func1, '张三', i)
- #f1= pool.submit(detectdemo1.infer)
- # print("线程1-----------------------------------------------------------------------------------")
- #detectdemo2 = Detect(weights=r"helmet.pt")
- #f2=pool.submit(detectdemo2.infer)
- # print("线程2-------------------------------------------------------------------------------------")
- #detectdemo3 = threading.Thread(target=detectdemo3.infer)
- #detectdemo3 = Detect(weights=r"fall.pt")
- #f3=pool.submit(detectdemo3.infer)
|