12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193 |
- """
- Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
- Usage - sources:
- $ python detect.py --weights yolov5s.pt --source 0 # webcam
- img.jpg # image
- vid.mp4 # video
- screen # screenshot
- path/ # directory
- list.txt # list of images
- list.streams # list of streams
- 'path/*.jpg' # glob
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
- 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
- Usage - formats:
- $ python detect.py --weights yolov5s.pt # PyTorch
- yolov5s.torchscript # TorchScript
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
- yolov5s_openvino_model # OpenVINO
- yolov5s.engine # TensorRT
- yolov5s.mlmodel # CoreML (macOS-only)
- yolov5s_saved_model # TensorFlow SavedModel
- yolov5s.pb # TensorFlow GraphDef
- yolov5s.tflite # TensorFlow Lite
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
- yolov5s_paddle_model # PaddlePaddle
- """
- import matplotlib.path as mat
- import requests
- import argparse
- import os
- import platform
- import sqlite3
- import sys
- import threading
- import time
- from pathlib import Path
- import signal
- import torch
- from concurrent.futures import ThreadPoolExecutor
- from concurrent.futures import ProcessPoolExecutor
- from multiprocessing import Process,Manager,Value
- from multiprocessing import Queue
- from multiprocessing import set_start_method
- import multiprocessing
- import multiprocessing as mp
- import numpy as np
- from torchvision import transforms
- FILE = Path(__file__).resolve()
- ROOT = FILE.parents[0]
- if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT))
- ROOT = Path(os.path.relpath(ROOT, Path.cwd()))
- from models.common import DetectMultiBackend
- from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLT,LoadStreamsSQLTN
- from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
- increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,apply_classifier1,compute_IOU,task,compute_IOU)
- from utils.plots import Annotator, colors, save_one_box
- from utils.torch_utils import select_device, smart_inference_mode
- from utils.renwu import newHelmet,newUniform,Fall,Personcount,Arm,Bag,Cross,Extinguisher,Persontre,Bag,Danager
- import torch.nn as nn
- import torch.nn.functional as F
- from ultralytics import YOLO
- from pydantic import BaseModel
- url = "http://172.19.152.231/open/api/operate/upload"
- urlrtsp = "http://172.19.152.231/open/api/operate/previewURLs"
- urlt = "http://172.19.152.231/open/api/operate/taskList"
- urla = "http://172.19.152.231/open/api/operate/algorithmList"
- urlele = "http://172.19.152.231/open/api/operate/fence"
- urltime = "http://172.19.152.231/open/api/operate/getTime"
- urlperson = "http://172.19.152.231/open/api/operate/getPersonLimitNum"
- modelnamedir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'duty','98':'face','55':'oil','52':'jingdian','53':'rope','54':'personcar','39':'inspection','11':'reflective','12':'phone','66':'extinguisher','67':'tizi','68':'menjin','35':'arm','36':'persontre','33':'bag'}
- modellabeldir = {'0':'head,person','8':'person','10':'black_work_clothes,blue_work_clothes,person','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'person','98':'face','55':'oil','52':'person,hand,ball','53':'rope','54':'person','39':'person','11':'blue,greent,whitet,bluecoat,whitebarcoat,graycoat,baoan,chenyi,other','12':'phone','66':'extinguisher','67':'person,tizi','68':'person','35':'barearm','36':'person,foot,cart,bag,box','33':'handbox,handbag'}
- modelalgdir = {}
- personcountdir = {}
- for key,value in modelnamedir.items():
- modelalgdir[value] = key
- taskmap = {'helmet':newHelmet,'uniform':newUniform,'fall':Fall,'personcount':Personcount,'arm':Arm,'bag':Bag,'cross':Cross,'extinguisher':Extinguisher,'persontre':Persontre,'bag':Bag,'danager':Danager}
- mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
- test = transforms.Compose([transforms.Resize((224,224)),
-
- transforms.ToTensor(),
- transforms.Normalize(mean=mean, std=std)
- ])
- def clapre(modelcla,claimg,clapoint):
- imgten = torch.stack(claimg,dim=0)
- clapoint = torch.stack(clapoint,dim=0)
- imgten = imgten.to(0)
- result = modelcla(imgten)
- result = F.softmax(result)
- print(result)
- index = result.argmax(1)
- index = index.cpu().numpy()
- index = np.argwhere(index<5)
- index = index.reshape(-1)
- print(index)
- if len(index)>0:
- print(clapoint[index])
- return clapoint[index]
- else:
- return None
- class Model(nn.Module):
- def __init__(self, A, nnode, nfeature, nclass):
- super().__init__()
- self.fc1 = nn.Linear(nnode * nfeature, 512)
- self.fc2 = nn.Linear(512, nclass)
- def forward(self, x):
- x = x.view(-1, int(x.size(1) * x.size(2)))
- x = F.relu(self.fc1(x))
- x = F.dropout(x, 0.7, training=self.training)
- return self.fc2(x)
- def extract_keypoint(get_keypoint, keypoint):
-
- nose_x, nose_y = keypoint[get_keypoint.NOSE]
-
-
-
-
-
-
-
- left_shoulder_x, left_shoulder_y = keypoint[get_keypoint.LEFT_SHOULDER]
- right_shoulder_x, right_shoulder_y = keypoint[get_keypoint.RIGHT_SHOULDER]
-
- left_elbow_x, left_elbow_y = keypoint[get_keypoint.LEFT_ELBOW]
- right_elbow_x, right_elbow_y = keypoint[get_keypoint.RIGHT_ELBOW]
-
- left_wrist_x, left_wrist_y = keypoint[get_keypoint.LEFT_WRIST]
- right_wrist_x, right_wrist_y = keypoint[get_keypoint.RIGHT_WRIST]
-
- left_hip_x, left_hip_y = keypoint[get_keypoint.LEFT_HIP]
- right_hip_x, right_hip_y = keypoint[get_keypoint.RIGHT_HIP]
-
- left_knee_x, left_knee_y = keypoint[get_keypoint.LEFT_KNEE]
- right_knee_x, right_knee_y = keypoint[get_keypoint.RIGHT_KNEE]
-
- left_ankle_x, left_ankle_y = keypoint[get_keypoint.LEFT_ANKLE]
- right_ankle_x, right_ankle_y = keypoint[get_keypoint.RIGHT_ANKLE]
-
- return [
- nose_x, nose_y ,
- left_shoulder_x, left_shoulder_y ,
- right_shoulder_x, right_shoulder_y,
- left_elbow_x, left_elbow_y,
- right_elbow_x, right_elbow_y,
- left_wrist_x, left_wrist_y,
- right_wrist_x, right_wrist_y,
- left_hip_x, left_hip_y,
- right_hip_x, right_hip_y,
- left_knee_x, left_knee_y,
- right_knee_x, right_knee_y,
- left_ankle_x, left_ankle_y,
- right_ankle_x, right_ankle_y
- ]
- class GetKeypoint(BaseModel):
- NOSE: int = 0
- LEFT_EYE: int = 1
- RIGHT_EYE: int = 2
- LEFT_EAR: int = 3
- RIGHT_EAR: int = 4
- LEFT_SHOULDER: int = 5
- RIGHT_SHOULDER: int = 6
- LEFT_ELBOW: int = 7
- RIGHT_ELBOW: int = 8
- LEFT_WRIST: int = 9
- RIGHT_WRIST: int = 10
- LEFT_HIP: int = 11
- RIGHT_HIP: int = 12
- LEFT_KNEE: int = 13
- RIGHT_KNEE: int = 14
- LEFT_ANKLE: int = 15
- RIGHT_ANKLE: int = 16
- class Box(BaseModel):
- left : int
- top : int
- right : int
- bottom : int
- box_conf : float
- pose_classifer_conf : float
- label : str
- class YoloOpt:
- def __init__(self, weights=ROOT / 'yolov5s.pt',source=ROOT / 'data/images',data=ROOT / 'data/coco128.yaml',
- imgsz=(640,640),
- conf_thres=0.25,
- iou_thres=0.45,
- max_det=1000,
- device='',
- view_img=False,
- save_txt=False,
- save_conf=False,
- save_crop=False,
- nosave=True,
- classes=None,
- agnostic_nms=False,
- augment=False,
- visualize=False,
- update=False,
- project=ROOT / 'runs/detect',
- name='exp',
- exist_ok=False,
- line_thickness=1,
- hide_labels=False,
- hide_conf=False,
- half=False,
- dnn=False,
- vid_stride=10,
- classify=False,
- v8=False):
- self.weights = weights
- self.source = source
- self.data = data
- if imgsz is None:
- self.imgsz = (640, 640)
- self.imgsz = imgsz
- self.conf_thres = conf_thres
- self.iou_thres = iou_thres
- self.device = device
- self.view_img = view_img
- self.classes = classes
- self.agnostic_nms = agnostic_nms
- self.augment = augment
- self.update = update
- self.exist_ok = exist_ok
- self.project = project
- self.name = name
- self.max_det = max_det
- self.save_txt = save_txt
- self.save_conf= save_conf
- self.save_crop= save_crop
- self.nosave = nosave
- self.visualize = visualize
- self.line_thickness = line_thickness
- self.hide_labels = hide_labels
- self.hide_conf = hide_conf
- self.half = half
- self.dnn = dnn
- self.vid_stride = vid_stride
- self.classify = classify
- self.v8 = v8
- class Detect:
- def __init__(self, weights = ROOT / 'yolov5s.pt' , imgsz=(640,640),source="changshusql1103.db",classes=None,device=None,classify=False,conf_thres=0.25,v8=False):
- print(f'detectweights = {weights}')
- if v8:
- from ultralytics.nn.autobackend import AutoBackend
- from ultralytics.utils.ops import non_max_suppression
- else:
- from utils.general import non_max_suppression
- self.opt = YoloOpt(weights=weights, imgsz=imgsz,source=source,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
- self.source = str(self.opt.source)
- self.save_img = not self.opt.nosave and not source.endswith('.txt')
- is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
- is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
- self.webcam = self.source.isnumeric() or source.endswith('.db') or (is_url and not is_file)
- screenshot = self.source.lower().startswith('screen')
- if is_url and is_file:
- self.source = check_file(self.source)
- self.save_dir = increment_path(Path(self.opt.project) / self.opt.name, exist_ok=self.opt.exist_ok)
-
-
- (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)
- print(f'device = {self.opt.device}')
- device = select_device(self.opt.device)
- self.device = device
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- self.get_keypoint = GetKeypoint()
-
- self.pose_classfier_model = Model(None, 13, 2, 2)
- self.pose_classfier_model.load_state_dict(torch.load("posefallcls.pt"))
- self.pose_classfier_model.eval()
- self.pose_classfier_model.to(device)
- self.model = YOLO("posefall.pt")
- self.model = self.model.to(device)
- self.personmodel = YOLO('yolo11m.pt')
- self.personmodel = self.personmodel.to(device)
- self.readpoint()
-
- self.updatetime = time.time()
- self.updatemtime = time.time()
- self.filetime = os.path.getmtime(self.opt.weights)
- self.taskname = taskmap[Path(self.opt.weights).stem]()
- bs = 1
- if self.webcam:
-
- self.view_img = False
-
-
- elif screenshot:
- dataset = LoadScreenshots(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt)
- else:
- dataset = LoadImages(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt, vid_stride=self.opt.vid_stride)
- t1 = threading.Thread(target=self.load,daemon=True)
- t1.start()
- @smart_inference_mode()
- def infer(self,queue,runmodel):
- pretime = time.time()
- seen, windows, self.dt = 0, [], (Profile(), Profile(), Profile())
-
-
- while True:
-
- if time.time()-pretime>300:
- ret = self.readpoint()
- pretime = time.time()
- if not ret:
- print(f'{Path(self.opt.weights).stem} {runmodel}')
- runmodel.pop(Path(self.opt.weights).stem)
- print(f'{Path(self.opt.weights).stem} {runmodel}')
- break
- print(f'queuelen = {len(queue)}')
- for que in queue:
- if que.qsize() == 0:
- print('queuezero')
- time.sleep(0.01)
- if que.qsize() > 0:
-
-
-
-
-
-
-
-
- setframe = que.get()
-
-
- path, im, im0s, vid_cap, s, videotime ,channels = setframe
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- self.postprocess(path, im0s, im, s, videotime,channels)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- def postprocess(self, path, im0s, im, s,videotime,channels):
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- yolo_pose_results = self.model.predict(im0s,conf=0.8)
- yolo_person_results = self.personmodel.predict(im0s,conf=0.8,classes=0)
- for i,(result ,personresult) in enumerate(zip(yolo_pose_results,yolo_person_results)):
- flag = False
- boxes = result.boxes.xyxy.cpu().numpy().tolist()
- personboxes = personresult.boxes.xyxy.cpu().numpy().tolist()
- confs = result.boxes.conf.cpu().numpy().tolist()
- all_keypoints = result.keypoints.data.cpu().numpy().tolist()
- pose_classfier_results = []
- for box, conf, keypoints in zip(boxes, confs, all_keypoints):
- iouflag = False
- for personbox in personboxes:
- iou,_ = compute_IOU(box,personbox)
- print(f'judgeiou = {iou}')
- if iou >0.5:
- iouflag = True
- break;
- if not iouflag:
- break
- x1, y1, x2, y2 = box
- x, y, w, h = x1, y1, x2 - x1, y2 - y1
- n_keypoints = [[(kp[0] - x) / w - 0.5, (kp[1] - y) / h - 0.5] if kp[0] > 0 and kp[1] > 0 else kp[:2] for kp in keypoints]
- n_keypoints = extract_keypoint(self.get_keypoint, n_keypoints)
- if n_keypoints[-12:].count(0) >= 2 * 2:
- continue
- if n_keypoints.count(0) >= 4 * 2:
- continue
- if w < h:
- continue
- pose_data = torch.Tensor([n_keypoints]).to(self.device)
- pose_data = pose_data.reshape(1, 13, 2)
- with torch.no_grad():
- p = self.pose_classfier_model(pose_data)
- prob = F.softmax(p)
- index = prob.argmax()
- if index == 0:
- score = float(prob[0][index].cpu().numpy())
- pose_classfier_results.append(
- Box(left=x1, top=y1, right=x2, bottom=y2, box_conf=conf, pose_classifer_conf=score, label="falling"))
- image = im0s[i]
- imc = im0s[i].copy()
- for res in pose_classfier_results:
- flag = True
- cv2.rectangle(image, (int(res.left), int(res.top)), (int(res.right), int(res.bottom)), (0, 255, 0), 2)
- if flag:
-
- self.dirmodel[channels[i]]['detframe'].pop(0)
- self.dirmodel[channels[i]]['detframe'].append(1)
- self.dirmodel[channels[i]]['preim'] = image
- self.dirmodel[channels[i]]['oripreim'] = imc
- self.dirmodel[channels[i]]['posttime'] = videotime[i]
- print(self.dirmodel[channels[i]]['detframe'])
-
- else:
-
-
-
- self.dirmodel[channels[i]]['detframe'].pop(0)
- self.dirmodel[channels[i]]['detframe'].append(0)
- print(self.dirmodel[channels[i]]['detframe'])
-
-
-
-
-
-
-
-
- if not self.dirmodel[channels[i]]['detflag'] and self.dirmodel[channels[i]]['detframe'].count(1)>=1:
- self.dirmodel[channels[i]]['detflag'] = True
- self.dirmodel[channels[i]]['detpretime'] = time.time()
- elif self.dirmodel[channels[i]]['detframe'].count(1)==0 :
- self.dirmodel[channels[i]]['detflag'] = False
- self.dirmodel[channels[i]]['detpretime'] = float('inf')
-
-
- if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and time.time() - self.dirmodel[channels[i]]['detpretime'] > self.dirmodel[channels[i]]['durtime'] and self.dirmodel[channels[i]]['detflag']:
-
-
-
-
- print('post-------------------------------------------------------------------------')
-
-
-
-
- success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim'])
- content = encoded_image.tobytes()
- successori, encoded_imageori = cv2.imencode('.jpg', self.dirmodel[channels[i]]['oripreim'])
- contentori = encoded_imageori.tobytes()
- filename = f'{int(time.time())}.jpg'
- filenameori = f'ori_{int(time.time())}.jpg'
-
- print(channels[i])
- payload = {'channel': self.dirmodel[channels[i]]['channel'],
- 'classIndex': self.dirmodel[channels[i]]['classindex'],
- 'ip': self.dirmodel[channels[i]]['algip'],
- 'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', self.dirmodel[channels[i]]['posttime']),
- 'videoUrl': channels[i]}
- files = [
- ('file', (filename, content, 'image/jpeg')),
- ('oldFile', (filenameori, contentori, 'image/jpeg')),
- ]
- try:
- result = requests.post(url, data=payload, files=files)
- print(result)
- except Exception:
- print('posterror')
-
- self.dirmodel[channels[i]]['postpretime'] = time.time()
- self.dirmodel[channels[i]]['detflag'] = False
- timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime'])
- year = time.strftime('%Y',time.localtime(time.time()))
- month = time.strftime('%m',time.localtime(time.time()))
- day = time.strftime('%d',time.localtime(time.time()))
- savefold = f'/mnt/project/images/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
- savefold = Path(savefold)
- savefold.mkdir(parents=True,exist_ok=True)
- detsavefold = f'/mnt/project/detimages/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
- detsavefold = Path(detsavefold)
- detsavefold.mkdir(parents=True,exist_ok=True)
- cv2.imwrite(f'{savefold}/{timesave}.png',self.dirmodel[channels[i]]['oripreim'])
- cv2.imwrite(f'{detsavefold}/{timesave}det.png',self.dirmodel[channels[i]]['preim'])
-
-
-
- self.view_img = False
- if self.view_img:
- if platform.system() == 'Linux' and p not in windows:
- windows.append(p)
- cv2.namedWindow(f'{str(p)}-{Path(self.opt.weights).stem}',
- cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
- cv2.resizeWindow(f'{str(p)}-{Path(self.opt.weights).stem}', im0.shape[1], im0.shape[0])
- im1 = cv2.resize(im0, (1280, 720))
- cv2.imshow(f'{str(p)}-{Path(self.opt.weights).stem}', im1)
- cv2.waitKey(1)
-
-
- print(f'channels[i]={channels[i]}')
-
- def load(self):
- conn = sqlite3.connect(self.source)
- c = conn.cursor()
- while True:
-
-
- cursor = c.execute(
- "SELECT modelname, addstream,delstream,streaming from CHANGESTREAM WHERE modelname= (?)",(Path(self.opt.weights).stem,))
-
-
-
-
-
-
-
- self.contentid = cursor.fetchall()
-
-
-
- print(f'loadcontent={self.contentid[0][3]}')
- time.sleep(3)
- c.close()
- conn.close()
- def readpoint(self):
- data = {
- "algorithmCode": '25',
- "deviceIp":None,
- 'fwqCode':None
- }
- self.dirmodel = {}
- result = requests.post(url=urlt,data=data).json()['data']
- channell=[]
- for info in result:
-
-
-
-
- channel = info["deviceChannel"]
- channell.append(channel)
- self.dirmodel[channel] = {}
- self.dirmodel[channel]['fence'] = 1 if len(info["electricFence"])>0 else 0
- if Path(self.opt.weights).stem == "uniform":
- self.dirmodel[channel]['fence'] = 1
-
- self.dirmodel[channel]['channel'] = info['deviceChannel']
- self.dirmodel[channel]['classindex'] = info['algorithmCode']
- self.dirmodel[channel]['ip'] = info['deviceIp']
- self.dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
- dataele = {
- "algorithmCode": self.dirmodel[channel]['classindex'],
- "algorithmIp":self.dirmodel[channel]['algip'],
- "channel":self.dirmodel[channel]['channel']
- }
- resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
- resultele = resultele.split(',||')
- resultele = tuple(resultele)
- point = '%s:'*len(resultele) %resultele
- if Path(self.opt.weights).stem == 'personcount':
- resultper = requests.post(url=urlperson,data=dataele).json()['data']
- personcountdir[channel] = int(resultper)
- if len(point[:-2])<=1 and Path(self.opt.weights).stem == "uniform":
- self.dirmodel[channel]['point'] = "256#144,1024#144,1024#576,256#576"
- else:
- self.dirmodel[channel]['point'] = point[:-2]
- self.dirmodel[channel]['preim'] = None
- self.dirmodel[channel]['oripreim'] = None
- self.dirmodel[channel]['detframe'] = [0 for _ in range(2)]
- self.dirmodel[channel]['postpretime'] = 0
- self.dirmodel[channel]['detflag'] = False
- self.dirmodel[channel]['detpretime'] = float('inf')
- self.dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
- if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty' :
- datatime= {
- "algorithmCode": self.dirmodel[channel]['classindex'],
- "algorithmIp":self.dirmodel[channel]['algip'],
- "channel":self.dirmodel[channel]['channel']
- }
- resulttime = requests.post(url=urltime,data=dataele).json()['data']
- self.dirmodel[channel]['durtime'] = int(resulttime)
- else:
- self.dirmodel[channel]['durtime'] = 0
- self.dirmodel[channel]['posttime'] = 0
- print(self.dirmodel)
- return sorted(channell)
-
-
-
-
-
-
-
-
-
-
- def getframe(queuelist,channelsl,source,tt,numworks,lock,numworkv):
- while True:
- print("dataloader")
- imgsz = [640, 640]
- print(f'source = {source}')
- dataset = LoadStreamsSQLTN(channelsl,source, img_size=640,
- auto=True, vid_stride=20, tt=tt,numworks = numworks)
- bs = len(dataset)
- vid_path, vid_writer = [None] * bs, [None] * bs
-
-
-
-
- print(imgsz)
-
- seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
-
-
- pretime = time.time()
- tag = 0
- sourcebase = 'project0117.db'
- for path, im, im0s, vid_cap, s, videotime,channels in dataset:
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- if time.time()-pretime > 300:
- channellist = []
- pretime = time.time()
- data = {
- "algorithmCode": '25',
- "deviceIp":None,
- "fwqCode":None
- }
- try:
- result = requests.post(url=urlt,data=data).json()['data']
- except Exception:
- result = []
- for info in result:
- data = {
- "channel": info["deviceChannel"],
- "ip": info["deviceAlgorithmIp"]
- }
- chaflag = any(info["deviceChannel"] in t for t in channellist)
-
- if not chaflag:
- address = requests.post(url=urlrtsp,data=data).json()['msg']
- channellist.append((info['deviceChannel'],address))
- channelsa = []
- sourcea = []
- channellist = set(channellist)
- channellist = sorted(channellist,key=lambda x:x[0])
-
- for cha,add in channellist:
- channelsa.append(cha)
- sourcea.append(add)
- channelsl = sorted(channelsl)
-
- if channelsa!=channelsl and len(channelsa)>0:
- print(f'channelsa = {channelsa}')
- print(f'channelsl = {channelsl}')
- dataset.close()
- channelsl = channelsa
- source = sourcea
- break;
- for key,value in queuelist.items():
- hour = time.localtime(time.time()).tm_hour
- if hour in range(7,18):
- value[-1].put((path, im, im0s, vid_cap, s, videotime,channels))
- value[-1].get() if value[-1].qsize() == 10 else time.sleep(0.001)
- def getmutpro(channels,source,streamlist,numworkv,lock,numworks=1,modellen=None):
- processlist = []
- queuelist = {}
- for i in range(numworks):
- for model in modellen:
- queue = Queue(maxsize=10)
- queuelist.setdefault(model,[])
- queuelist[model].append(queue)
- process = Process(target=getframe,
- args=(queuelist, channels,source, i,numworks,lock,numworkv))
- processlist.append(process)
- process.start()
-
- return queuelist
-
- def modelfun(queue,weights,sourcedb,classes,device,classify,conf_thres,runmodel,v8=False):
- print(weights)
- detectdemo=Detect(weights=weights,source=sourcedb,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
- detectdemo.infer(queue,runmodel)
- def parse_opt():
- parser = argparse.ArgumentParser()
- parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
- opt = parser.parse_args()
- return opt
- def main(opt):
- check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
- run(**vars(opt))
- if __name__ == '__main__':
-
-
- opt = parse_opt()
- dbpath = 'projectnew.db'
-
- conn = sqlite3.connect(dbpath)
- c = conn.cursor()
- task(c,conn,urlt,urla)
- cursor = c.execute('select channel,algip from stream where modelname = "fall"')
- result = cursor.fetchall()
- for channel ,algip in result:
- data = {
- "channel": channel,
- "ip":algip
- }
-
- address = requests.post(url=urlrtsp,data=data).json()['msg']
- c.execute('UPDATE STREAM set address= (?) where channel =(?)',(address,channel))
- conn.commit()
- cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
-
- content = cursor.fetchall()
- cursor = c.execute("SELECT address,channel from STREAM where modelname = 'fall'")
-
- contenta = cursor.fetchall()
- source = []
- modellist = []
- addcha = []
- channellist = []
- for i in contenta:
- addcha.append((i[0],i[1]))
-
- addcha = set(addcha)
- addcha = sorted(addcha,key=lambda x:x[1])
- for add,cha in addcha:
- source.append(add)
- channellist.append(cha)
-
- print(addcha)
- source = list(source)
- cursor = c.execute("SELECT modelname from STREAM where (modelname ='fall')")
- contentm = cursor.fetchall()
- for m in contentm:
- modellist.append(m[0])
- modellist = set(modellist)
- modellist = list(modellist)
- contentlist = []
- for i in content:
- contentlist.append(i[0])
-
- n = len(content)
- print(f'modelname={n}')
- print(content)
-
- print(content)
- print(source)
-
-
- streamqueue = Queue(maxsize=4)
- numworkv = Value('i', 0)
- manager = Manager()
- lock = multiprocessing.Lock()
- streamlist = manager.list()
- numworks = 3
- modellen = []
- for i in modellist:
- if i in contentlist:
- modellen.append(i)
- queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks,modellen)
- deid = 0
-
- runmodel = manager.dict()
- while True:
- for i in modellist:
- if i in contentlist:
- if i not in runmodel:
-
-
- c.execute('select conf,cla from changestream where modelname = (?)',(i,))
- rea = c.fetchall()
- print(f'weights ={i[0]}.pt')
- if i in ['duty','danager','inspection','cross','personcount']:
- process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,[0],0,rea[0][1],rea[0][0],runmodel))
- else:
- if i in ['uniform','arm','helmet']:
- process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel,True))
- else:
- process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel))
-
-
-
-
-
-
-
-
- time.sleep(3)
- process.start()
- deid = deid+1
- runmodel[i] = 1
- time.sleep(600)
- task(c,conn,urlt,urla)
-
-
- cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
- content = cursor.fetchall()
- contentlist = []
- for con in content:
- contentlist.append(con[0])
-
- cursor = c.execute("SELECT address,channel from STREAM where modelname = 'fall'")
- contenta = cursor.fetchall()
- source = []
- modellist = []
- addcha = []
- channellist = []
- for i in contenta:
- addcha.append((i[0],i[1]))
-
- addcha = set(addcha)
- addcha = sorted(addcha)
- for a,cha in addcha:
- source.append(a)
- channellist.append(cha)
- print(addcha)
-
- source = list(source)
-
- cursor = c.execute("SELECT modelname from STREAM where (modelname = 'fall')")
- contentm = cursor.fetchall()
- for m in contentm:
- modellist.append(m[0])
- modellist = set(modellist)
- n = len(content)
- print(f'modelname={n}')
- print(content)
-
- print(content)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
|