123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857 |
- """
- Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
- Usage - sources:
- $ python detect.py --weights yolov5s.pt --source 0 # webcam
- img.jpg # image
- vid.mp4 # video
- screen # screenshot
- path/ # directory
- list.txt # list of images
- list.streams # list of streams
- 'path/*.jpg' # glob
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
- 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
- Usage - formats:
- $ python detect.py --weights yolov5s.pt # PyTorch
- yolov5s.torchscript # TorchScript
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
- yolov5s_openvino_model # OpenVINO
- yolov5s.engine # TensorRT
- yolov5s.mlmodel # CoreML (macOS-only)
- yolov5s_saved_model # TensorFlow SavedModel
- yolov5s.pb # TensorFlow GraphDef
- yolov5s.tflite # TensorFlow Lite
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
- yolov5s_paddle_model # PaddlePaddle
- """
- import matplotlib.path as mat
- import requests
- import argparse
- import os
- import platform
- import sqlite3
- import sys
- import threading
- import time
- from pathlib import Path
- import signal
- import torch
- from concurrent.futures import ThreadPoolExecutor
- from concurrent.futures import ProcessPoolExecutor
- from multiprocessing import Process,Manager,Value
- from multiprocessing import Queue
- from multiprocessing import set_start_method
- import multiprocessing
- import multiprocessing as mp
- import numpy as np
- import platform
- import pathlib
- from collections import defaultdict, deque
- import asyncio
- FILE = Path(__file__).resolve()
- ROOT = FILE.parents[0]
- if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT))
- ROOT = Path(os.path.relpath(ROOT, Path.cwd()))
- import math
- from models.common import DetectMultiBackend
- from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLTN
- from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
- increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,strtolstl,apply_classifier1,task)
- from utils.plots import Annotator, colors, save_one_box
- from utils.torch_utils import select_device, smart_inference_mode
- from ultralytics import YOLO
- from person_jump_check import personJump
- plt = platform.system()
- if plt != 'Windows':
- pathlib.WindowsPath = pathlib.PosixPath
- urlhead = "http://172.19.152.231"
- url = f"{urlhead}/open/api/operate/upload"
- urlele = f"{urlhead}/open/api/operate/fence"
- urlperson = f"{urlhead}/open/api/operate/getPersonLimitNum"
- urlt = f"{urlhead}/open/api/operate/taskList"
- urla = f"{urlhead}/open/api/operate/algorithmList"
- weburl = f"ws://36.7.84.146:28801/websocket/device"
- personcountdir = {}
- algdir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'duty','98':'face','51':'run','64':'jump'}
- modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person','64':'person'}
- modelalgdir = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51','jump':'64'}
- algmodel = {}
- for key,value in algdir.items():
- algmodel[value] = key
- def map_to_ellipse(position):
- x, y = position
- center_x = 640
- center_y = 360
- a = 640
- b = 360
- x_norm = x / 1280
- y_norm = y / 720
- d_norm = math.sqrt((x_norm - 0.5) ** 2 + (y_norm - 0.5) ** 2)
- theta_norm = math.atan2(y_norm - 0.5, x_norm - 0.5)
- f = d_norm
- a_new = a * f
- b_new = b * f
- bias_x = center_x + a_new * math.cos(theta_norm)
- bias_y = center_y + b_new * math.sin(theta_norm)
- return np.array([bias_x, bias_y])
- class YoloOpt:
- def __init__(self, weights=ROOT / 'yolov5s.pt',source=ROOT / 'data/images',data=ROOT / 'data/coco128.yaml',
- imgsz=(640,640),
- conf_thres=0.80,
- iou_thres=0.45,
- max_det=1000,
- device='',
- view_img=False,
- save_txt=False,
- save_conf=False,
- save_crop=False,
- nosave=True,
- classes=None,
- agnostic_nms=False,
- augment=False,
- visualize=False,
- update=False,
- project=ROOT / 'runs/detect',
- name='exp',
- exist_ok=False,
- line_thickness=1,
- hide_labels=False,
- hide_conf=False,
- half=False,
- dnn=False,
- vid_stride=10,
- classify=False):
- self.weights = weights
- self.source = source
- self.data = data
- if imgsz is None:
- self.imgsz = (640, 640)
- self.imgsz = imgsz
- self.conf_thres = conf_thres
- self.iou_thres = iou_thres
- self.device = device
- self.view_img = view_img
- self.classes = classes
- self.agnostic_nms = agnostic_nms
- self.augment = augment
- self.update = update
- self.exist_ok = exist_ok
- self.project = project
- self.name = name
- self.max_det = max_det
- self.save_txt = save_txt
- self.save_conf= save_conf
- self.save_crop= save_crop
- self.nosave = nosave
- self.visualize = visualize
- self.line_thickness = line_thickness
- self.hide_labels = hide_labels
- self.hide_conf = hide_conf
- self.half = half
- self.dnn = dnn
- self.vid_stride = vid_stride
- self.classify = classify
- class Detect:
- def __init__(self, weights = ROOT / 'yolov5s.pt' , imgsz=(640,640),source="changshusql1103.db",classify=False,conf_thres=0.80,device='',channelsl=''):
- print(f'detectweights = {weights}')
- self.opt = YoloOpt(weights=weights, imgsz=imgsz,source=source,classify=classify,conf_thres=conf_thres,device=device)
- self.source = str(self.opt.source)
- self.save_img = not self.opt.nosave and not source.endswith('.txt')
- is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
- is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
- self.webcam = True
- screenshot = self.source.lower().startswith('screen')
- if is_url and is_file:
- self.source = check_file(self.source)
- self.save_dir = increment_path(Path(self.opt.project) / self.opt.name, exist_ok=self.opt.exist_ok)
-
-
- (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)
-
-
-
- self.updatetime = time.time()
-
- bs = 1
- if self.webcam:
-
- self.view_img = False
-
- tt= 0
- numworks = 1
- self.dataset = LoadStreamsSQLTN(channelsl, source, img_size=640,
- auto=True, vid_stride=1, tt=tt, numworks=numworks)
-
- elif screenshot:
- dataset = LoadScreenshots(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt)
- else:
- self.dataset = LoadImages(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt, vid_stride=self.opt.vid_stride)
- self.speed_threshold = 40
- self.high_velocity_count_threshold = 6
-
-
- @smart_inference_mode()
- def infer(self,weights,classify,conf_thres,device,runmodel):
- device = select_device(device)
- print("loadmodel device{device}")
-
-
-
- region_points = [(214, 519),(235, 512),(251, 503),(269, 496),(283, 486),(296, 482),(302, 493),(296, 507),(283, 517),(275, 526),(247, 533),(227, 538),(209, 538),(207, 529),(203, 521)]
- counter = personJump(
-
-
- show=False,
- region=region_points,
- model=weights,
- classes=[0],
- conf=0.4,
- )
-
- if classify:
- classifier_model = torch.load(f"{Path(weights).stem}cls.pt")
- classifier_model = classifier_model.to(device)
- classifier_model.eval()
- print('classify--------------------------------------------------------------------')
-
-
- readchannel,dirmodel = self.readpoint(weights)
-
- updatetime = time.time()
- filetime = os.path.getmtime(weights)
-
- pretime = time.time()
- seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
-
-
-
-
-
- for path, im, im0s, vid_cap, s, videotime,channels in self.dataset:
- hour = time.localtime(time.time()).tm_hour
- if hour not in range(7,18):
- time.sleep(30)
- continue
- i = 0
- ima = im0s[i].copy()
- imc = ima.copy()
- flag = False
-
-
-
-
-
- im0 = counter.count(ima)
-
- if counter.in_count > 0:
- flag = True
- counter.in_count = 0
-
-
- if flag:
-
- dirmodel[channels[i]]['detframe'].pop(0)
- dirmodel[channels[i]]['detframe'].append(1)
- dirmodel[channels[i]]['preim'] = im0
- dirmodel[channels[i]]['oripreim'] = imc
- dirmodel[channels[i]]['posttime'] = videotime[i]
- print(dirmodel[channels[i]]['detframe'])
-
- else:
-
-
-
- dirmodel[channels[i]]['detframe'].pop(0)
- dirmodel[channels[i]]['detframe'].append(0)
- print(dirmodel[channels[i]]['detframe'])
-
-
-
-
-
-
-
-
- if not dirmodel[channels[i]]['detflag'] and dirmodel[channels[i]]['detframe'].count(1)>=1:
- dirmodel[channels[i]]['detflag'] = True
- dirmodel[channels[i]]['detpretime'] = time.time()
- elif dirmodel[channels[i]]['detframe'].count(1)==0 :
- dirmodel[channels[i]]['detflag'] = False
- dirmodel[channels[i]]['detpretime'] = float('inf')
-
-
- if time.time() - dirmodel[channels[i]]['postpretime'] >30 and time.time() - dirmodel[channels[i]]['detpretime'] > dirmodel[channels[i]]['durtime'] and dirmodel[channels[i]]['detflag']:
-
-
-
-
-
-
-
-
-
- success, encoded_image = cv2.imencode('.jpg', dirmodel[channels[i]]['preim'])
- content = encoded_image.tobytes()
- successori, encoded_imageori = cv2.imencode('.jpg', dirmodel[channels[i]]['oripreim'])
- contentori = encoded_imageori.tobytes()
- filename = f'0_{int(time.time())}.jpg'
- filenameori = f'0_{int(time.time())}.jpg'
-
- print(channels[i])
- payload = {'channel': dirmodel[channels[i]]['channel'],
- 'classIndex': dirmodel[channels[i]]['classindex'],
- 'ip': dirmodel[channels[i]]['algip'],
- 'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', dirmodel[channels[i]]['posttime']),
- 'videoUrl': channels[i]}
- fourcc = cv2.VideoWriter_fourcc(*'MP4V')
- fps = 6
- height,width,_ = dirmodel[channels[i]]['preim'].shape
- year=time.strftime('%Y',time.localtime(time.time()))
- month=time.strftime('%m',time.localtime(time.time()))
- day=time.strftime('%d',time.localtime(time.time()))
- vidsavefold = f'/mnt/yolo/videos/{Path(weights).stem}/{year}/{month}/{day}'
- vidsaveflod = Path(vidsavefold)
- vidsaveflod.mkdir(parents=True, exist_ok=True)
- timesave = time.strftime('%Y-%m-%d-%H:%M:%S', dirmodel[channels[i]]['posttime'])
-
-
-
-
-
-
- files = [
- ('file', (filename, content, 'image/jpeg')),
- ('oldFile',(filenameori, contentori, 'image/jpeg')),
-
- ]
- print(f'{Path(weights).stem}post----{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))}')
-
- try:
-
- resulttext = requests.post(url, data=payload, files=files)
-
-
-
-
-
-
- print(f'resulttext = {resulttext.json()["data"]}')
-
- except Exception:
- print("posterror")
-
-
-
- dirmodel[channels[i]]['postpretime'] = time.time()
- dirmodel[channels[i]]['detflag'] = False
-
-
-
-
- savefold = f'/mnt/yolo/images/{Path(weights).stem}/{year}/{month}/{day}'
- saveflod = Path(savefold)
- detsavefold = f'/mnt/yolo/detimages/{Path(weights).stem}/{year}/{month}/{day}'
- detsavefold = Path(detsavefold)
- saveflod.mkdir(parents=True, exist_ok=True)
- detsavefold.mkdir(parents=True, exist_ok=True)
- cv2.imwrite(f'{savefold}/{timesave}.jpg',dirmodel[channels[i]]['oripreim'])
- cv2.imwrite(f'{detsavefold}/{timesave}det.jpg',dirmodel[channels[i]]['preim'])
-
- self.view_img = False
- if self.view_img:
- if platform.system() == 'Linux' and p not in windows:
- windows.append(p)
- cv2.namedWindow(f'{str(p)}-{Path(weights).stem}',
- cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
- cv2.resizeWindow(f'{str(p)}-{Path(weights).stem}', im0.shape[1], im0.shape[0])
- im1 = cv2.resize(im0, (1280, 720))
- cv2.imshow(f'{str(p)}-{Path(weights).stem}', im1)
- cv2.waitKey(1)
-
-
- print(f'channels[i]={channels[i]}')
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- def readpoint(self,weights):
-
-
-
-
-
- data = {
- "algorithmCode": algmodel[Path(weights).stem],
- "deviceIp":None
- }
- dirmodel = {}
- result = requests.post(url=urlt,data=data).json()['data']
- channell=[]
- for info in result:
-
-
-
-
- channel = info["deviceChannel"]
- channell.append(channel)
- dirmodel[channel] = {}
- dirmodel[channel]['fence'] = 1 if len(info["electricFence"])>0 else 0
-
- dirmodel[channel]['channel'] = info['deviceChannel']
- dirmodel[channel]['classindex'] = info['algorithmCode']
- dirmodel[channel]['ip'] = info['deviceIp']
- dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
- dataele = {
- "algorithmCode": dirmodel[channel]['classindex'],
- "algorithmIp":dirmodel[channel]['algip'],
- "channel":dirmodel[channel]['channel']
- }
- resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
- resultele = resultele.split(',||')
- resultele = tuple(resultele)
- point = '%s:'*len(resultele) %resultele
- if Path(weights).stem == 'personcount':
- resultper = requests.post(url=urlperson,data=dataele).json()['data']
- personcountdir[channel] = resultper
- dirmodel[channel]['point'] = point[:-2]
- dirmodel[channel]['preim'] = None
- dirmodel[channel]['oripreim'] = None
- dirmodel[channel]['detframe'] = [0 for _ in range(2)]
- dirmodel[channel]['postpretime'] = 0
- dirmodel[channel]['detflag'] = False
- dirmodel[channel]['detpretime'] = float('inf')
- dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
- dirmodel[channel]['durtime'] = 0
- dirmodel[channel]['posttime'] = 0
- dirmodel[channel]['track_history'] = defaultdict(lambda: [])
- dirmodel[channel]['time_stamps'] = defaultdict(lambda: deque(maxlen=50))
- dirmodel[channel]['instantaneous_velocities'] = defaultdict(lambda: deque(maxlen=10))
-
-
- print(dirmodel)
- return sorted(channell),dirmodel
-
-
-
-
-
-
-
-
-
-
-
- def modelfun(detectdemo,weights,classify=False,conf_thres=0.80,device='',runmodel=None):
- print(weights)
-
- detectdemo.infer(weights,classify,conf_thres,device,runmodel)
-
- def detectmut(channellist,source,modellist,contentlist,modeladir,runmodel={},deviceid=''):
- detectdemo = Detect(channelsl=channellist,source=source)
-
- for modelname in modellist:
- if modelname in contentlist:
-
-
-
-
- print(f'weights ={modelname}.pt deviceid {deviceid}')
-
-
- process = threading.Thread(target=modelfun,args=(detectdemo,f'{modelname}.pt',modeladir[modelname]['cla'],modeladir[modelname]['conf'],deviceid,runmodel))
-
-
-
-
-
-
-
-
-
-
-
- time.sleep(3)
- process.start()
- def parse_opt():
- parser = argparse.ArgumentParser()
- parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
- opt = parser.parse_args()
- return opt
- def main(opt):
- check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
- run(**vars(opt))
- if __name__ == '__main__':
- torch.multiprocessing.set_start_method('spawn')
-
-
- torch.cuda.set_per_process_memory_fraction(0.6)
- opt = parse_opt()
- dbpath = 'projectnew.db'
- conn = sqlite3.connect(dbpath)
- c = conn.cursor()
- task(c,conn,urlt,urla)
- cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'jump'")
-
- content = cursor.fetchall()
- contentlist = []
- for con in content:
- contentlist.append(con[0])
-
- cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname = 'jump'")
- contenta = cursor.fetchall()
- source = []
- modellist = []
- addcha = []
- channellist = []
- for i in contenta:
- addcha.append((i[0],i[2]))
- modellist.append(i[1])
- addcha = set(addcha)
- addcha = sorted(addcha,key=lambda x:x[1])
- for a,cha in addcha:
- source.append(a)
- channellist.append(cha)
- print(addcha)
-
- source = list(source)
-
- modellist = set(modellist)
- n = len(content)
- print(f'modelname={n}')
- print(content)
-
- print(content)
-
-
- streamqueue = Queue(maxsize=4)
- numworkv = Value('i', 0)
- manager = Manager()
- lock = multiprocessing.Lock()
- streamlist = manager.list()
- numworks = 1
-
-
-
-
-
-
-
- modeladir = {}
- for modelname in modellist:
- if modelname in contentlist:
-
-
-
-
- print(f'weights ={modelname}.pt')
- c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
- rea = c.fetchall()
-
- modeladir.setdefault(modelname,{})
- modeladir[modelname]['conf'] = rea[0][0]
- modeladir[modelname]['cla'] = rea[0][1]
- runmodel = {}
- for deviceid,num in enumerate(range(0,len(channellist),13)):
- process = Process(target=detectmut,args=(channellist[num:num+13],source[num:num+13],modellist,contentlist,modeladir,{},deviceid%2))
- time.sleep(3)
- process.start()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
|