detectopencvmutfall0207.py 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193
  1. # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
  2. """
  3. Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
  4. Usage - sources:
  5. $ python detect.py --weights yolov5s.pt --source 0 # webcam
  6. img.jpg # image
  7. vid.mp4 # video
  8. screen # screenshot
  9. path/ # directory
  10. list.txt # list of images
  11. list.streams # list of streams
  12. 'path/*.jpg' # glob
  13. 'https://youtu.be/Zgi9g1ksQHc' # YouTube
  14. 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
  15. Usage - formats:
  16. $ python detect.py --weights yolov5s.pt # PyTorch
  17. yolov5s.torchscript # TorchScript
  18. yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
  19. yolov5s_openvino_model # OpenVINO
  20. yolov5s.engine # TensorRT
  21. yolov5s.mlmodel # CoreML (macOS-only)
  22. yolov5s_saved_model # TensorFlow SavedModel
  23. yolov5s.pb # TensorFlow GraphDef
  24. yolov5s.tflite # TensorFlow Lite
  25. yolov5s_edgetpu.tflite # TensorFlow Edge TPU
  26. yolov5s_paddle_model # PaddlePaddle
  27. """
  28. import matplotlib.path as mat
  29. import requests
  30. import argparse
  31. import os
  32. import platform
  33. import sqlite3
  34. import sys
  35. import threading
  36. import time
  37. from pathlib import Path
  38. import signal
  39. import torch
  40. from concurrent.futures import ThreadPoolExecutor
  41. from concurrent.futures import ProcessPoolExecutor
  42. from multiprocessing import Process,Manager,Value
  43. from multiprocessing import Queue
  44. from multiprocessing import set_start_method
  45. import multiprocessing
  46. import multiprocessing as mp
  47. import numpy as np
  48. from torchvision import transforms
  49. FILE = Path(__file__).resolve()
  50. ROOT = FILE.parents[0] # YOLOv5 root directory
  51. if str(ROOT) not in sys.path:
  52. sys.path.append(str(ROOT)) # add ROOT to PATH
  53. ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
  54. from models.common import DetectMultiBackend
  55. from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLT,LoadStreamsSQLTN
  56. from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
  57. increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,apply_classifier1,compute_IOU,task,compute_IOU)
  58. from utils.plots import Annotator, colors, save_one_box
  59. from utils.torch_utils import select_device, smart_inference_mode
  60. from utils.renwu import newHelmet,newUniform,Fall,Personcount,Arm,Bag,Cross,Extinguisher,Persontre,Bag,Danager
  61. import torch.nn as nn
  62. import torch.nn.functional as F
  63. from ultralytics import YOLO
  64. from pydantic import BaseModel
  65. #from testpool import func1,TestA
  66. # def my_handler(signum, frame):
  67. # exit(0)
  68. #url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
  69. url = "http://172.19.152.231/open/api/operate/upload"
  70. urlrtsp = "http://172.19.152.231/open/api/operate/previewURLs"
  71. urlt = "http://172.19.152.231/open/api/operate/taskList"
  72. urla = "http://172.19.152.231/open/api/operate/algorithmList"
  73. urlele = "http://172.19.152.231/open/api/operate/fence"
  74. urltime = "http://172.19.152.231/open/api/operate/getTime"
  75. urlperson = "http://172.19.152.231/open/api/operate/getPersonLimitNum"
  76. #modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person'}
  77. #algmodel = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51'}
  78. modelnamedir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'duty','98':'face','55':'oil','52':'jingdian','53':'rope','54':'personcar','39':'inspection','11':'reflective','12':'phone','66':'extinguisher','67':'tizi','68':'menjin','35':'arm','36':'persontre','33':'bag'}
  79. modellabeldir = {'0':'head,person','8':'person','10':'black_work_clothes,blue_work_clothes,person','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'person','98':'face','55':'oil','52':'person,hand,ball','53':'rope','54':'person','39':'person','11':'blue,greent,whitet,bluecoat,whitebarcoat,graycoat,baoan,chenyi,other','12':'phone','66':'extinguisher','67':'person,tizi','68':'person','35':'barearm','36':'person,foot,cart,bag,box','33':'handbox,handbag'}
  80. modelalgdir = {}
  81. personcountdir = {}
  82. for key,value in modelnamedir.items():
  83. modelalgdir[value] = key
  84. taskmap = {'helmet':newHelmet,'uniform':newUniform,'fall':Fall,'personcount':Personcount,'arm':Arm,'bag':Bag,'cross':Cross,'extinguisher':Extinguisher,'persontre':Persontre,'bag':Bag,'danager':Danager}
  85. mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
  86. test = transforms.Compose([transforms.Resize((224,224)),
  87. #transforms.CenterCrop(224),
  88. transforms.ToTensor(),
  89. transforms.Normalize(mean=mean, std=std)
  90. ])
  91. def clapre(modelcla,claimg,clapoint):
  92. imgten = torch.stack(claimg,dim=0)
  93. clapoint = torch.stack(clapoint,dim=0)
  94. imgten = imgten.to(0)
  95. result = modelcla(imgten)
  96. result = F.softmax(result)
  97. print(result)
  98. index = result.argmax(1)
  99. index = index.cpu().numpy()
  100. index = np.argwhere(index<5)
  101. index = index.reshape(-1)
  102. print(index)
  103. if len(index)>0:
  104. print(clapoint[index])
  105. return clapoint[index]
  106. else:
  107. return None
  108. class Model(nn.Module):
  109. def __init__(self, A, nnode, nfeature, nclass):
  110. super().__init__()
  111. self.fc1 = nn.Linear(nnode * nfeature, 512)
  112. self.fc2 = nn.Linear(512, nclass)
  113. def forward(self, x):
  114. x = x.view(-1, int(x.size(1) * x.size(2)))
  115. x = F.relu(self.fc1(x))
  116. x = F.dropout(x, 0.7, training=self.training)
  117. return self.fc2(x)
  118. def extract_keypoint(get_keypoint, keypoint):
  119. # nose
  120. nose_x, nose_y = keypoint[get_keypoint.NOSE]
  121. # eye
  122. # left_eye_x, left_eye_y = keypoint[get_keypoint.LEFT_EYE]
  123. # right_eye_x, right_eye_y = keypoint[get_keypoint.RIGHT_EYE]
  124. # # ear
  125. # left_ear_x, left_ear_y = keypoint[get_keypoint.LEFT_EAR]
  126. # right_ear_x, right_ear_y = keypoint[get_keypoint.RIGHT_EAR]
  127. # shoulder
  128. left_shoulder_x, left_shoulder_y = keypoint[get_keypoint.LEFT_SHOULDER]
  129. right_shoulder_x, right_shoulder_y = keypoint[get_keypoint.RIGHT_SHOULDER]
  130. # elbow
  131. left_elbow_x, left_elbow_y = keypoint[get_keypoint.LEFT_ELBOW]
  132. right_elbow_x, right_elbow_y = keypoint[get_keypoint.RIGHT_ELBOW]
  133. # wrist
  134. left_wrist_x, left_wrist_y = keypoint[get_keypoint.LEFT_WRIST]
  135. right_wrist_x, right_wrist_y = keypoint[get_keypoint.RIGHT_WRIST]
  136. # hip
  137. left_hip_x, left_hip_y = keypoint[get_keypoint.LEFT_HIP]
  138. right_hip_x, right_hip_y = keypoint[get_keypoint.RIGHT_HIP]
  139. # knee
  140. left_knee_x, left_knee_y = keypoint[get_keypoint.LEFT_KNEE]
  141. right_knee_x, right_knee_y = keypoint[get_keypoint.RIGHT_KNEE]
  142. # ankle
  143. left_ankle_x, left_ankle_y = keypoint[get_keypoint.LEFT_ANKLE]
  144. right_ankle_x, right_ankle_y = keypoint[get_keypoint.RIGHT_ANKLE]
  145. return [
  146. nose_x, nose_y ,
  147. left_shoulder_x, left_shoulder_y ,
  148. right_shoulder_x, right_shoulder_y,
  149. left_elbow_x, left_elbow_y,
  150. right_elbow_x, right_elbow_y,
  151. left_wrist_x, left_wrist_y,
  152. right_wrist_x, right_wrist_y,
  153. left_hip_x, left_hip_y,
  154. right_hip_x, right_hip_y,
  155. left_knee_x, left_knee_y,
  156. right_knee_x, right_knee_y,
  157. left_ankle_x, left_ankle_y,
  158. right_ankle_x, right_ankle_y
  159. ]
  160. class GetKeypoint(BaseModel):
  161. NOSE: int = 0
  162. LEFT_EYE: int = 1
  163. RIGHT_EYE: int = 2
  164. LEFT_EAR: int = 3
  165. RIGHT_EAR: int = 4
  166. LEFT_SHOULDER: int = 5
  167. RIGHT_SHOULDER: int = 6
  168. LEFT_ELBOW: int = 7
  169. RIGHT_ELBOW: int = 8
  170. LEFT_WRIST: int = 9
  171. RIGHT_WRIST: int = 10
  172. LEFT_HIP: int = 11
  173. RIGHT_HIP: int = 12
  174. LEFT_KNEE: int = 13
  175. RIGHT_KNEE: int = 14
  176. LEFT_ANKLE: int = 15
  177. RIGHT_ANKLE: int = 16
  178. class Box(BaseModel):
  179. left : int
  180. top : int
  181. right : int
  182. bottom : int
  183. box_conf : float
  184. pose_classifer_conf : float
  185. label : str
  186. class YoloOpt:
  187. def __init__(self, weights=ROOT / 'yolov5s.pt',source=ROOT / 'data/images',data=ROOT / 'data/coco128.yaml',
  188. imgsz=(640,640),
  189. conf_thres=0.25,
  190. iou_thres=0.45,
  191. max_det=1000,
  192. device='',
  193. view_img=False,
  194. save_txt=False,
  195. save_conf=False,
  196. save_crop=False,
  197. nosave=True,
  198. classes=None,
  199. agnostic_nms=False,
  200. augment=False,
  201. visualize=False,
  202. update=False,
  203. project=ROOT / 'runs/detect',
  204. name='exp',
  205. exist_ok=False,
  206. line_thickness=1,
  207. hide_labels=False,
  208. hide_conf=False,
  209. half=False,
  210. dnn=False,
  211. vid_stride=10,
  212. classify=False,
  213. v8=False):
  214. self.weights = weights # 权重文件地址
  215. self.source = source # 待识别的图像
  216. self.data = data
  217. if imgsz is None:
  218. self.imgsz = (640, 640)
  219. self.imgsz = imgsz # 输入图片的大小,默认 (640,640)
  220. self.conf_thres = conf_thres # object置信度阈值 默认0.25 用在nms中
  221. self.iou_thres = iou_thres # 做nms的iou阈值 默认0.45 用在nms中
  222. self.device = device # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法
  223. self.view_img = view_img # 是否展示预测之后的图片或视频 默认False
  224. self.classes = classes # 只保留一部分的类别,默认是全部保留
  225. self.agnostic_nms = agnostic_nms # 进行NMS去除不同类别之间的框, 默认False
  226. self.augment = augment # augmented inference TTA测试时增强/多尺度预测,可以提分
  227. self.update = update # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
  228. self.exist_ok = exist_ok # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
  229. self.project = project # 保存测试日志的参数,本程序没有用到
  230. self.name = name # 每次实验的名称,本程序也没有用到
  231. self.max_det = max_det
  232. self.save_txt = save_txt
  233. self.save_conf= save_conf
  234. self.save_crop= save_crop
  235. self.nosave = nosave
  236. self.visualize = visualize
  237. self.line_thickness = line_thickness
  238. self.hide_labels = hide_labels
  239. self.hide_conf = hide_conf
  240. self.half = half
  241. self.dnn = dnn
  242. self.vid_stride = vid_stride
  243. self.classify = classify
  244. self.v8 = v8
  245. class Detect:
  246. def __init__(self, weights = ROOT / 'yolov5s.pt' , imgsz=(640,640),source="changshusql1103.db",classes=None,device=None,classify=False,conf_thres=0.25,v8=False):
  247. print(f'detectweights = {weights}')
  248. if v8:
  249. from ultralytics.nn.autobackend import AutoBackend
  250. from ultralytics.utils.ops import non_max_suppression
  251. else:
  252. from utils.general import non_max_suppression
  253. self.opt = YoloOpt(weights=weights, imgsz=imgsz,source=source,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
  254. self.source = str(self.opt.source)
  255. self.save_img = not self.opt.nosave and not source.endswith('.txt') # save inference images
  256. is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
  257. is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
  258. self.webcam = self.source.isnumeric() or source.endswith('.db') or (is_url and not is_file)
  259. screenshot = self.source.lower().startswith('screen')
  260. if is_url and is_file:
  261. self.source = check_file(self.source) # download
  262. self.save_dir = increment_path(Path(self.opt.project) / self.opt.name, exist_ok=self.opt.exist_ok) # increment run
  263. #self.save_dir = self.save_dir / Path(self.opt.weights).stem
  264. #self.save_dir.mkdir(parents=True, exist_ok=True)
  265. (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) # make dir
  266. print(f'device = {self.opt.device}')
  267. device = select_device(self.opt.device)
  268. self.device = device
  269. # if v8:
  270. # self.model = AutoBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  271. # if Path(weights).stem in ['arm']:
  272. # self.personmodel = AutoBackend('yolov8m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  273. # else:
  274. # self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  275. # if Path(weights).stem in ['helmet','arm','uniform']:
  276. # self.personmodel = DetectMultiBackend('personcount.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  277. # self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
  278. # self.classify = classify
  279. # if self.classify:
  280. # classifier_model = torch.load(f"{Path(weights).stem}cls.pt")
  281. # self.classifier_model = classifier_model.to(device)
  282. # self.classifier_model.eval()
  283. # self.imgsz = check_img_size(self.opt.imgsz, s=self.stride)
  284. # self.model.warmup(imgsz=(1 , 3, *self.imgsz))
  285. self.get_keypoint = GetKeypoint()
  286. # load model
  287. self.pose_classfier_model = Model(None, 13, 2, 2)
  288. self.pose_classfier_model.load_state_dict(torch.load("posefallcls.pt"))
  289. self.pose_classfier_model.eval()
  290. self.pose_classfier_model.to(device)
  291. self.model = YOLO("posefall.pt")
  292. self.model = self.model.to(device)
  293. self.personmodel = YOLO('yolo11m.pt')
  294. self.personmodel = self.personmodel.to(device)
  295. self.readpoint()
  296. #print(self.imgsz)
  297. self.updatetime = time.time()
  298. self.updatemtime = time.time()
  299. self.filetime = os.path.getmtime(self.opt.weights)
  300. self.taskname = taskmap[Path(self.opt.weights).stem]()
  301. bs = 1 # batch_size
  302. if self.webcam:
  303. #self.view_img = check_imshow(warn=True)
  304. self.view_img = False
  305. # dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
  306. # bs = len(dataset)
  307. elif screenshot:
  308. dataset = LoadScreenshots(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt)
  309. else:
  310. dataset = LoadImages(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt, vid_stride=self.opt.vid_stride)
  311. t1 = threading.Thread(target=self.load,daemon=True)
  312. t1.start()
  313. @smart_inference_mode()
  314. def infer(self,queue,runmodel):
  315. pretime = time.time()
  316. seen, windows, self.dt = 0, [], (Profile(), Profile(), Profile())
  317. #
  318. # print ("数据库打开成功")
  319. while True:
  320. #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
  321. if time.time()-pretime>300:
  322. ret = self.readpoint()
  323. pretime = time.time()
  324. if not ret:
  325. print(f'{Path(self.opt.weights).stem} {runmodel}')
  326. runmodel.pop(Path(self.opt.weights).stem)
  327. print(f'{Path(self.opt.weights).stem} {runmodel}')
  328. break
  329. print(f'queuelen = {len(queue)}')
  330. for que in queue:
  331. if que.qsize() == 0:
  332. print('queuezero')
  333. time.sleep(0.01)
  334. if que.qsize() > 0:
  335. #if time.time()-pretime>300:
  336. # ret = self.readpoint()
  337. # pretime = time.time()
  338. # if not ret:
  339. # print(f'{Path(self.opt.weights).stem} {runmodel}')
  340. # runmodel.pop(Path(self.opt.weights).stem)
  341. # print(f'{Path(self.opt.weights).stem} {runmodel}')
  342. # break
  343. setframe = que.get()
  344. # print('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')
  345. #if setframe is not None
  346. path, im, im0s, vid_cap, s, videotime ,channels = setframe
  347. # algchannel = list(self.dirmodel.keys())
  348. # print(algchannel)
  349. # print(path)
  350. # algchannel = np.array(algchannel)
  351. # channelsnp = np.array(channels)
  352. # algindex = np.where(np.in1d(channelsnp, algchannel))[0]
  353. # algindex = list(algindex)
  354. # path = np.array(path)
  355. # path = path[algindex]
  356. # path = path.tolist()
  357. # channels = np.array(channels)
  358. # channels = channels[algindex]
  359. # channels = channels.tolist()
  360. # print(algindex)
  361. # if len(algindex)==0:
  362. # continue
  363. #for ia in algindex:
  364. # print(type(im0s[ia]))
  365. # print(im0s[ia].shape)
  366. #im = im[algindex]
  367. #for ia in algindex:
  368. # print(type(ia))
  369. # try:
  370. # im0s = np.asarray(im0s)
  371. # except Exception:
  372. # im0s = np.asarray(im0s,dtype=object)
  373. # print(im0s.shape)
  374. # im0s = im0s[algindex]
  375. # # im0s = im0s.tolist()
  376. # print(f'algindex = {algindex}')
  377. # print(f'im0s ={im0s[0].shape}')
  378. # videotime = np.array(videotime)
  379. # videotime = videotime[algindex]
  380. # videotime = tuple(map(tuple, videotime))
  381. # global tag
  382. # if self.contentid[0][3] == 1 :
  383. # dataset.close()
  384. # print('newstreaming=', self.contentid[0][3])
  385. # conn = sqlite3.connect(self.source)
  386. # c = conn.cursor()
  387. # c.execute("UPDATE CHANGESTREAM set streamimg = 0 , addstream=0,delstream=0 where modelname= (?)",(Path(self.opt.weights).stem,))
  388. # print(123)
  389. # conn.commit()
  390. # c.close()
  391. # conn.close()
  392. # print('opencv1')
  393. # cv2.destroyAllWindows()
  394. # print('opencv')
  395. # break
  396. # else:
  397. # print('nonewstreaming=', self.contentid[0][3])
  398. # with self.dt[0]:
  399. # im = torch.from_numpy(im).to(self.model.device)
  400. # im = im.half() if self.model.fp16 else im.float() # uint8 to fp16/32
  401. # im /= 255 # 0 - 255 to 0.0 - 1.0
  402. # if len(im.shape) == 3:
  403. # im = im[None] # expand for batch dim
  404. # # Inference
  405. # with self.dt[1]:
  406. # visualize = increment_path(self.save_dir / Path(path).stem,
  407. # mkdir=True) if self.opt.visualize else False
  408. # #print('error')
  409. # # print(self.model)
  410. # pred = self.model(im, augment=self.opt.augment, visualize=visualize)
  411. self.postprocess(path, im0s, im, s, videotime,channels)
  412. # print(f'predshape= {')
  413. # NMS
  414. #processlist = []
  415. #for i in range(3):
  416. # process = Process(target=self.postprocess,args=(pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s))
  417. # process = Process(target=self.preprocess)
  418. # process.start()
  419. # processlist.append(process)
  420. #for j in processlist:
  421. # j.join()
  422. #with ProcessPoolExecutor(3) as ppool:
  423. #for i in range(3):
  424. # print('hello')
  425. #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
  426. #ppool.submit(func1, '张三', i)
  427. #ppool.submit(self.preprocess)
  428. #self.postprocess(pred, path, im0s, dataset, im, s)
  429. #process = Process(target=self.postprocess, args=(pred, path, im0s, dataset, im, s))
  430. #self.postprocess(pred, path, im0s, im, s,videotime)
  431. #process.start()
  432. #def preprocess(self):
  433. # print('preprocess-----------------------------------------------')
  434. def postprocess(self, path, im0s, im, s,videotime,channels):
  435. # if time.time()-self.updatemtime>300:
  436. # if self.filetime !=os.path.getmtime(self.opt.weights):
  437. # device = select_device(self.opt.device)
  438. # print("load new load")
  439. # self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  440. # self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
  441. # #try:
  442. # #if modelalgdir[Path(self.opt.weights).stem]!='0':
  443. # print(modelalgdir[Path(self.opt.weights).stem])
  444. # try:
  445. # rea = requests.post(url=urla,data={'algorithmCode':modelalgdir[Path(self.opt.weights).stem]}).json()['data']
  446. # con = rea[0]['confidence']
  447. # self.opt.conf_thres = con
  448. # except Exception:
  449. # print('error')
  450. # #else:
  451. # # self.opt.conf_thres = 0.25
  452. # #except Exception:
  453. # #print('posturlaerror')
  454. # self.updatemtime = time.time()
  455. # seen = 0
  456. # # dt = (Profile(), Profile(), Profile())
  457. # print(f'senn = {seen}')
  458. # windows = []
  459. # if Path(self.opt.weights).stem:
  460. # labelnamelist = []
  461. # with self.dt[2]:
  462. # #print(f'cropshape={pred.shape}')
  463. # if self.opt.v8:
  464. # from ultralytics.utils.ops import non_max_suppression
  465. # else:
  466. # from utils.general import non_max_suppression
  467. # pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, self.opt.classes,
  468. # self.opt.agnostic_nms, max_det=self.opt.max_det)
  469. # # Second-stage classifier (optional)
  470. # # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
  471. # if self.classify and Path(self.opt.weights).stem!='persontre':
  472. # pred = apply_classifier1(pred,self.classifier_model,im,im0s,Path(self.opt.weights).stem)
  473. # # Process predictions
  474. # #print(f'predshape={pred.shape}')
  475. # for i, det in enumerate(pred): # per image
  476. # if time.time()-self.updatetime>300:
  477. # dataele = {
  478. # "algorithmCode": self.dirmodel[channels[i]]['classindex'],
  479. # "algorithmIp":self.dirmodel[channels[i]]['algip'],
  480. # "channel":self.dirmodel[channels[i]]['channel']
  481. # }
  482. # try:
  483. # resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
  484. # resultele = resultele.split(',||')
  485. # resultele = tuple(resultele)
  486. # point = '%s:'*len(resultele) %resultele
  487. # self.dirmodel[channels[i]]['point'] = point[:-2]
  488. # except Exception:
  489. # print('post error')
  490. # if Path(self.opt.weights).stem == 'personcount':
  491. # try:
  492. # resultper = requests.post(url=urlperson,data=dataele).json()['data']
  493. # personcountdir[channels[i]] = int(resultper)
  494. # except Exception:
  495. # print('urlpersonerror')
  496. # if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty' :
  497. # datatime= {
  498. # "algorithmCode": self.dirmodel[channels[i]]['classindex'],
  499. # "algorithmIp":self.dirmodel[channels[i]]['algip'],
  500. # "channel":self.dirmodel[channels[i]]['channel']
  501. # }
  502. # try:
  503. # resulttime = requests.post(url=urltime,data=dataele).json()['data']
  504. # self.dirmodel[channel]['durtime'] = int(resulttime)
  505. # except Exception:
  506. # print('posttime error')
  507. # self.updatetime = time.time()
  508. # seen += 1
  509. # if self.webcam: # batch_size >= 1
  510. # p, im0 = path[i], im0s[i].copy()
  511. # s += f'{i}: '
  512. # else:
  513. # p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
  514. # p = Path(p) # to Path
  515. # save_path = str(self.save_dir / p.name) # im.jpg
  516. # #txt_path = str(self.save_dir / 'labels' / p.stem) + (
  517. # # '' #if dataset.mode == 'image' else f'_{frame}') # im.txt
  518. # s += '%gx%g ' % im.shape[2:] # print string
  519. # gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
  520. # imc = im0.copy() # for save_crop
  521. # annotator = Annotator(im0, line_width=self.opt.line_thickness, example=str(self.names))
  522. # flag = False
  523. # if len(det) and Path(self.opt.weights).stem != 'duty':
  524. # #flag = True
  525. # # Rescale boxes from img_size to im0 size
  526. # det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
  527. # # Print results
  528. # for c in det[:, 5].unique():
  529. # n = (det[:, 5] == c).sum() # detections per class
  530. # s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
  531. # # Write results
  532. # if Path(self.opt.weights).stem in ['arm']:
  533. # personpred = self.personmodel(im[i][None], None, None)
  534. # personpred = non_max_suppression(personpred, self.opt.conf_thres, self.opt.iou_thres, 0,
  535. # self.opt.agnostic_nms, max_det=self.opt.max_det)
  536. # if len(personpred[0])==0:
  537. # flag = False
  538. # else:
  539. # persondet = []
  540. # personpred = personpred[0]
  541. # personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
  542. # for *perxyxy,conf,cls in reversed(personpred):
  543. # print(perxyxy)
  544. # x1,y1,x3,y3 = perxyxy
  545. # x1,y1,x3,y3 = int(x1),int(y1),int(x3),int(y3)
  546. # x2,y2 = x3,y1
  547. # x4,y4 = x1,y3
  548. # persondet.append([[x1,y1],[x2,y2],[x3,y3],[x4,y4]])
  549. # flag = self.taskname.getflag(det,persondet,annotator,self.dirmodel[channels[i]]['fence'],self.dirmodel[channels[i]]['point'],self.names,self.dirmodel[channels[i]]['label'])
  550. # else:
  551. # if Path(self.opt.weights).stem in ['personcount']:
  552. # flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
  553. # self.dirmodel[channels[i]]['point'], self.names,
  554. # self.dirmodel[channels[i]]['label'],personcountdir[channels[i]])
  555. # elif Path(self.opt.weights).stem in ['persontre']:
  556. # flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
  557. # self.dirmodel[channels[i]]['point'], self.names,
  558. # self.dirmodel[channels[i]]['label'],1,imc)
  559. # else:
  560. # flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
  561. # self.dirmodel[channels[i]]['point'], self.names,
  562. # self.dirmodel[channels[i]]['label'])
  563. yolo_pose_results = self.model.predict(im0s,conf=0.8)
  564. yolo_person_results = self.personmodel.predict(im0s,conf=0.8,classes=0)
  565. for i,(result ,personresult) in enumerate(zip(yolo_pose_results,yolo_person_results)):
  566. flag = False
  567. boxes = result.boxes.xyxy.cpu().numpy().tolist()
  568. personboxes = personresult.boxes.xyxy.cpu().numpy().tolist()
  569. confs = result.boxes.conf.cpu().numpy().tolist()
  570. all_keypoints = result.keypoints.data.cpu().numpy().tolist()
  571. pose_classfier_results = []
  572. for box, conf, keypoints in zip(boxes, confs, all_keypoints):
  573. iouflag = False
  574. for personbox in personboxes:
  575. iou,_ = compute_IOU(box,personbox)
  576. print(f'judgeiou = {iou}')
  577. if iou >0.5:
  578. iouflag = True
  579. break;
  580. if not iouflag:
  581. break
  582. x1, y1, x2, y2 = box
  583. x, y, w, h = x1, y1, x2 - x1, y2 - y1
  584. n_keypoints = [[(kp[0] - x) / w - 0.5, (kp[1] - y) / h - 0.5] if kp[0] > 0 and kp[1] > 0 else kp[:2] for kp in keypoints]
  585. n_keypoints = extract_keypoint(self.get_keypoint, n_keypoints)
  586. if n_keypoints[-12:].count(0) >= 2 * 2:
  587. continue
  588. if n_keypoints.count(0) >= 4 * 2:
  589. continue
  590. if w < h:
  591. continue
  592. pose_data = torch.Tensor([n_keypoints]).to(self.device)
  593. pose_data = pose_data.reshape(1, 13, 2)
  594. with torch.no_grad():
  595. p = self.pose_classfier_model(pose_data)
  596. prob = F.softmax(p)
  597. index = prob.argmax()
  598. if index == 0:
  599. score = float(prob[0][index].cpu().numpy())
  600. pose_classfier_results.append(
  601. Box(left=x1, top=y1, right=x2, bottom=y2, box_conf=conf, pose_classifer_conf=score, label="falling"))
  602. image = im0s[i]
  603. imc = im0s[i].copy()
  604. for res in pose_classfier_results:
  605. flag = True
  606. cv2.rectangle(image, (int(res.left), int(res.top)), (int(res.right), int(res.bottom)), (0, 255, 0), 2)
  607. if flag:
  608. #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
  609. self.dirmodel[channels[i]]['detframe'].pop(0)
  610. self.dirmodel[channels[i]]['detframe'].append(1)
  611. self.dirmodel[channels[i]]['preim'] = image
  612. self.dirmodel[channels[i]]['oripreim'] = imc
  613. self.dirmodel[channels[i]]['posttime'] = videotime[i]
  614. print(self.dirmodel[channels[i]]['detframe'])
  615. #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
  616. else:
  617. #print(f'deti= {i}')
  618. #print(detframe[i])
  619. #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
  620. self.dirmodel[channels[i]]['detframe'].pop(0)
  621. self.dirmodel[channels[i]]['detframe'].append(0)
  622. print(self.dirmodel[channels[i]]['detframe'])
  623. #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
  624. #print(detframe[i])
  625. # Stream results
  626. #im0 = annotator.result()
  627. #print(f'i = {i}')
  628. #print(channels[i])
  629. #print(postpretime[i])
  630. #print(detframe[i])
  631. if not self.dirmodel[channels[i]]['detflag'] and self.dirmodel[channels[i]]['detframe'].count(1)>=1:
  632. self.dirmodel[channels[i]]['detflag'] = True
  633. self.dirmodel[channels[i]]['detpretime'] = time.time()
  634. elif self.dirmodel[channels[i]]['detframe'].count(1)==0 :
  635. self.dirmodel[channels[i]]['detflag'] = False
  636. self.dirmodel[channels[i]]['detpretime'] = float('inf')
  637. # Stream results
  638. #im0 = annotator.result()
  639. if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and time.time() - self.dirmodel[channels[i]]['detpretime'] > self.dirmodel[channels[i]]['durtime'] and self.dirmodel[channels[i]]['detflag']:
  640. #print()
  641. #if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and self.dirmodel[channels[i]]['detflag']:
  642. #print(time.time() - self.dirmodel[channels[i]]['detpretime'])
  643. #print(self.dirmodel[channels[i]]['detflag'])
  644. print('post-------------------------------------------------------------------------')
  645. #time.sleep(30)
  646. #print(time.time() - postpretime[i])
  647. #print('111111111111111111111111111111111111111111111111')
  648. #print(dirmodel[channels[i]]['preim'].shape)
  649. success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim'])
  650. content = encoded_image.tobytes()
  651. successori, encoded_imageori = cv2.imencode('.jpg', self.dirmodel[channels[i]]['oripreim'])
  652. contentori = encoded_imageori.tobytes()
  653. filename = f'{int(time.time())}.jpg'
  654. filenameori = f'ori_{int(time.time())}.jpg'
  655. #print(f'str(p) {p.name}')
  656. print(channels[i])
  657. payload = {'channel': self.dirmodel[channels[i]]['channel'],
  658. 'classIndex': self.dirmodel[channels[i]]['classindex'],
  659. 'ip': self.dirmodel[channels[i]]['algip'],
  660. 'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', self.dirmodel[channels[i]]['posttime']),
  661. 'videoUrl': channels[i]}
  662. files = [
  663. ('file', (filename, content, 'image/jpeg')),
  664. ('oldFile', (filenameori, contentori, 'image/jpeg')),
  665. ]
  666. try:
  667. result = requests.post(url, data=payload, files=files)
  668. print(result)
  669. except Exception:
  670. print('posterror')
  671. #time.sleep(3000)
  672. self.dirmodel[channels[i]]['postpretime'] = time.time()
  673. self.dirmodel[channels[i]]['detflag'] = False
  674. timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime'])
  675. year = time.strftime('%Y',time.localtime(time.time()))
  676. month = time.strftime('%m',time.localtime(time.time()))
  677. day = time.strftime('%d',time.localtime(time.time()))
  678. savefold = f'/mnt/project/images/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
  679. savefold = Path(savefold)
  680. savefold.mkdir(parents=True,exist_ok=True)
  681. detsavefold = f'/mnt/project/detimages/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
  682. detsavefold = Path(detsavefold)
  683. detsavefold.mkdir(parents=True,exist_ok=True)
  684. cv2.imwrite(f'{savefold}/{timesave}.png',self.dirmodel[channels[i]]['oripreim'])
  685. cv2.imwrite(f'{detsavefold}/{timesave}det.png',self.dirmodel[channels[i]]['preim'])
  686. #if self.dirmodel[channels[i]]['detframe'].count(1)==0:
  687. # self.dirmodel[channels[i]]['detflag'] = False
  688. #time.sleep(1)
  689. self.view_img = False
  690. if self.view_img:
  691. if platform.system() == 'Linux' and p not in windows:
  692. windows.append(p)
  693. cv2.namedWindow(f'{str(p)}-{Path(self.opt.weights).stem}',
  694. cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
  695. cv2.resizeWindow(f'{str(p)}-{Path(self.opt.weights).stem}', im0.shape[1], im0.shape[0])
  696. im1 = cv2.resize(im0, (1280, 720))
  697. cv2.imshow(f'{str(p)}-{Path(self.opt.weights).stem}', im1)
  698. cv2.waitKey(1) # 1 millisecond
  699. # Save results (image with detections)
  700. # Print time (inference-only)
  701. print(f'channels[i]={channels[i]}')
  702. #LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms {str(p)}-{Path(self.opt.weights).stem}")
  703. def load(self):
  704. conn = sqlite3.connect(self.source)
  705. c = conn.cursor()
  706. while True:
  707. #
  708. # print ("数据库打开成功")
  709. cursor = c.execute(
  710. "SELECT modelname, addstream,delstream,streaming from CHANGESTREAM WHERE modelname= (?)",(Path(self.opt.weights).stem,))
  711. # content = cursor.fetchall()
  712. # if content[0][1] ==1 or content[0][2] ==1:
  713. # c.execute("UPDATE CHANGESTREAM set streamimg = 1 where modelname='yolov5s'")
  714. # print("updata changestream")
  715. # conn.commit()
  716. # cursor = c.execute(
  717. # "SELECT modelname, addstream,delstream,streamimg from CHANGESTREAM WHERE modelname='yolov5s'")
  718. self.contentid = cursor.fetchall()
  719. #global tag
  720. #tag = Value('i', self.contentid[0][3])
  721. #print(tag.value==1)
  722. print(f'loadcontent={self.contentid[0][3]}')
  723. time.sleep(3)
  724. c.close()
  725. conn.close()
  726. def readpoint(self):
  727. data = {
  728. "algorithmCode": '25',
  729. "deviceIp":None,
  730. 'fwqCode':None
  731. }
  732. self.dirmodel = {}
  733. result = requests.post(url=urlt,data=data).json()['data']
  734. channell=[]
  735. for info in result:
  736. #content = cursor.fetchall()
  737. #self.dirmodel = {}
  738. #for address,fence,point,channel,classindex,ip ,algip,label,durtime in content:
  739. #address = f'{address[:-1]}0'
  740. channel = info["deviceChannel"]
  741. channell.append(channel)
  742. self.dirmodel[channel] = {}
  743. self.dirmodel[channel]['fence'] = 1 if len(info["electricFence"])>0 else 0
  744. if Path(self.opt.weights).stem == "uniform":
  745. self.dirmodel[channel]['fence'] = 1
  746. #self.dirmodel[channel]['point'] = point
  747. self.dirmodel[channel]['channel'] = info['deviceChannel']
  748. self.dirmodel[channel]['classindex'] = info['algorithmCode']
  749. self.dirmodel[channel]['ip'] = info['deviceIp']
  750. self.dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
  751. dataele = {
  752. "algorithmCode": self.dirmodel[channel]['classindex'],
  753. "algorithmIp":self.dirmodel[channel]['algip'],
  754. "channel":self.dirmodel[channel]['channel']
  755. }
  756. resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
  757. resultele = resultele.split(',||')
  758. resultele = tuple(resultele)
  759. point = '%s:'*len(resultele) %resultele
  760. if Path(self.opt.weights).stem == 'personcount':
  761. resultper = requests.post(url=urlperson,data=dataele).json()['data']
  762. personcountdir[channel] = int(resultper)
  763. if len(point[:-2])<=1 and Path(self.opt.weights).stem == "uniform":
  764. self.dirmodel[channel]['point'] = "256#144,1024#144,1024#576,256#576"
  765. else:
  766. self.dirmodel[channel]['point'] = point[:-2]
  767. self.dirmodel[channel]['preim'] = None
  768. self.dirmodel[channel]['oripreim'] = None
  769. self.dirmodel[channel]['detframe'] = [0 for _ in range(2)]
  770. self.dirmodel[channel]['postpretime'] = 0
  771. self.dirmodel[channel]['detflag'] = False
  772. self.dirmodel[channel]['detpretime'] = float('inf')
  773. self.dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
  774. if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty' :
  775. datatime= {
  776. "algorithmCode": self.dirmodel[channel]['classindex'],
  777. "algorithmIp":self.dirmodel[channel]['algip'],
  778. "channel":self.dirmodel[channel]['channel']
  779. }
  780. resulttime = requests.post(url=urltime,data=dataele).json()['data']
  781. self.dirmodel[channel]['durtime'] = int(resulttime)
  782. else:
  783. self.dirmodel[channel]['durtime'] = 0
  784. self.dirmodel[channel]['posttime'] = 0
  785. print(self.dirmodel)
  786. return sorted(channell)
  787. # str = str.split(":")
  788. # lista = []
  789. # for liststr in str:
  790. # if len(liststr) > 0:
  791. # li = liststr.split(',')
  792. # listpoint = []
  793. # for i, j in zip(li[::2], li[1::2]):
  794. # listpoint.append((i, j))
  795. # lista.append(listpoint)
  796. # return listpoint
  797. #def preprocess():
  798. # print('preprocess-----------------------------------------------')
  799. def getframe(queuelist,channelsl,source,tt,numworks,lock,numworkv):
  800. while True:
  801. print("dataloader")
  802. imgsz = [640, 640]
  803. print(f'source = {source}')
  804. dataset = LoadStreamsSQLTN(channelsl,source, img_size=640,
  805. auto=True, vid_stride=20, tt=tt,numworks = numworks)
  806. bs = len(dataset)
  807. vid_path, vid_writer = [None] * bs, [None] * bs
  808. # self.detframe = [[0 for _ in range(8)] for i in range(bs)]
  809. # self.postpretime = [0]*bs
  810. # Run inference
  811. #imgsz = (1 , 3, *self.imgsz)
  812. print(imgsz)
  813. #self.model.warmup(imgsz=(1 , 3, *imgsz)) # warmup
  814. seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
  815. #
  816. # print ("数据库打开成功")
  817. pretime = time.time()
  818. tag = 0
  819. sourcebase = 'project0117.db'
  820. for path, im, im0s, vid_cap, s, videotime,channels in dataset:
  821. # print('*'*21)
  822. # global tag
  823. # print('datasetvideo')
  824. # if time.time()-pretime > 300:
  825. # pretime = time.time()
  826. # conn = sqlite3.connect(sourcebase)
  827. # c = conn.cursor()
  828. # cursor = c.execute("SELECT modelname, addstream,delstream,streaming from CHANGESTREAM WHERE modelname= 'helmet'")
  829. # contentid = cursor.fetchall()
  830. # tag = contentid[0][3]
  831. # if tag == 1:
  832. # lock.acquire()
  833. # numworkv.value += 1
  834. # dataset.close()
  835. # if numworkv.value==3:
  836. # print('newstreaming=', tag)
  837. # conn = sqlite3.connect(source)
  838. # c = conn.cursor()
  839. # c.execute("UPDATE CHANGESTREAM set streaming = 0 , addstream=0,delstream=0 where modelname='helmet'")
  840. # print(123)
  841. # conn.commit()
  842. # c.close()
  843. # conn.close()
  844. # lock.release()
  845. # print('opencv1')
  846. # # cv2.destroyAllWindows()
  847. # print('opencv')
  848. # break
  849. # else:
  850. # print('nonewstreaming=', tag)
  851. if time.time()-pretime > 300:
  852. channellist = []
  853. pretime = time.time()
  854. data = {
  855. "algorithmCode": '25',
  856. "deviceIp":None,
  857. "fwqCode":None
  858. }
  859. try:
  860. result = requests.post(url=urlt,data=data).json()['data']
  861. except Exception:
  862. result = []
  863. for info in result:
  864. data = {
  865. "channel": info["deviceChannel"],
  866. "ip": info["deviceAlgorithmIp"]
  867. }
  868. chaflag = any(info["deviceChannel"] in t for t in channellist)
  869. #personcountdir[channel] = num
  870. if not chaflag:
  871. address = requests.post(url=urlrtsp,data=data).json()['msg']
  872. channellist.append((info['deviceChannel'],address))
  873. channelsa = []
  874. sourcea = []
  875. channellist = set(channellist)
  876. channellist = sorted(channellist,key=lambda x:x[0])
  877. #channellist = set(channellist)
  878. for cha,add in channellist:
  879. channelsa.append(cha)
  880. sourcea.append(add)
  881. channelsl = sorted(channelsl)
  882. #channelsa = sorted(channelsa)
  883. if channelsa!=channelsl and len(channelsa)>0:
  884. print(f'channelsa = {channelsa}')
  885. print(f'channelsl = {channelsl}')
  886. dataset.close()
  887. channelsl = channelsa
  888. source = sourcea
  889. break;
  890. for key,value in queuelist.items():
  891. hour = time.localtime(time.time()).tm_hour
  892. if hour in range(7,18):
  893. value[-1].put((path, im, im0s, vid_cap, s, videotime,channels))
  894. value[-1].get() if value[-1].qsize() == 10 else time.sleep(0.001)
  895. def getmutpro(channels,source,streamlist,numworkv,lock,numworks=1,modellen=None):
  896. processlist = []
  897. queuelist = {}
  898. for i in range(numworks):
  899. for model in modellen:
  900. queue = Queue(maxsize=10)
  901. queuelist.setdefault(model,[])
  902. queuelist[model].append(queue)
  903. process = Process(target=getframe,
  904. args=(queuelist, channels,source, i,numworks,lock,numworkv))
  905. processlist.append(process)
  906. process.start()
  907. #queuelist.append(queue)
  908. return queuelist
  909. # path = []
  910. # im0s = []
  911. # vid_cap = None
  912. # s = ''
  913. # videotime = []
  914. # while True:
  915. # imlist = []
  916. # pathlist = []
  917. # im0slist = []
  918. # channelslist = []
  919. # vid_cap = None
  920. # s = ''
  921. # videotimelist = []
  922. # for q in queuelist:
  923. # if q.qsize()>0:
  924. # setframe = q.get()
  925. # path, im, im0s, vid_cap, s, videotime ,channels = setframe
  926. # pathlist += path
  927. # channelslist +=channels
  928. # im0slist += im0s
  929. # videotimelist += videotime
  930. # imlist.append(im)
  931. # if len(imlist)>0:
  932. # im = np.concatenate(imlist)
  933. # if len(pathlist)>0:
  934. # print(len(path),im.shape,len(im0s))
  935. # streamlist.append((pathlist, im, im0slist, vid_cap, s, videotimelist,channelslist))
  936. #print(f'streamlist = {len(streamlist)}')
  937. # streamlist.pop(0) if len(streamlist) > 3 else time.sleep(0.001)
  938. def modelfun(queue,weights,sourcedb,classes,device,classify,conf_thres,runmodel,v8=False):
  939. print(weights)
  940. detectdemo=Detect(weights=weights,source=sourcedb,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
  941. detectdemo.infer(queue,runmodel)
  942. def parse_opt():
  943. parser = argparse.ArgumentParser()
  944. parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
  945. opt = parser.parse_args()
  946. return opt
  947. def main(opt):
  948. check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
  949. run(**vars(opt))
  950. if __name__ == '__main__':
  951. #torch.multiprocessing.set_start_method('spawn')
  952. #set_start_method('spawn')
  953. opt = parse_opt()
  954. dbpath = 'projectnew.db'
  955. conn = sqlite3.connect(dbpath)
  956. #
  957. # print ("数据库打开成功")
  958. c = conn.cursor()
  959. task(c,conn,urlt,urla)
  960. cursor = c.execute('select channel,algip from stream where modelname = "fall"')
  961. result = cursor.fetchall()
  962. for channel ,algip in result:
  963. data = {
  964. "channel": channel,
  965. "ip":algip
  966. }
  967. #personcountdir[channel] = num
  968. address = requests.post(url=urlrtsp,data=data).json()['msg']
  969. c.execute('UPDATE STREAM set address= (?) where channel =(?)',(address,channel))
  970. conn.commit()
  971. cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
  972. #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
  973. content = cursor.fetchall()
  974. cursor = c.execute("SELECT address,channel from STREAM where modelname = 'fall'")
  975. #cursor = c.execute("SELECT address from STREAM where modelname = 'helmet'")
  976. contenta = cursor.fetchall()
  977. source = []
  978. modellist = []
  979. addcha = []
  980. channellist = []
  981. for i in contenta:
  982. addcha.append((i[0],i[1]))
  983. #modellist.append(i[1])
  984. addcha = set(addcha)
  985. addcha = sorted(addcha,key=lambda x:x[1])
  986. for add,cha in addcha:
  987. source.append(add)
  988. channellist.append(cha)
  989. #source = set(source)
  990. print(addcha)
  991. source = list(source)
  992. cursor = c.execute("SELECT modelname from STREAM where (modelname ='fall')")
  993. contentm = cursor.fetchall()
  994. for m in contentm:
  995. modellist.append(m[0])
  996. modellist = set(modellist)
  997. modellist = list(modellist)
  998. contentlist = []
  999. for i in content:
  1000. contentlist.append(i[0])
  1001. #source.sort()
  1002. n = len(content)
  1003. print(f'modelname={n}')
  1004. print(content)
  1005. #content.reverse()
  1006. print(content)
  1007. print(source)
  1008. # main(opt)
  1009. #processes = []
  1010. streamqueue = Queue(maxsize=4)
  1011. numworkv = Value('i', 0)
  1012. manager = Manager()
  1013. lock = multiprocessing.Lock()
  1014. streamlist = manager.list()
  1015. numworks = 3
  1016. modellen = []
  1017. for i in modellist:
  1018. if i in contentlist:
  1019. modellen.append(i)
  1020. queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks,modellen)
  1021. deid = 0
  1022. #pool = ThreadPoolExecutor(max_workers=n)
  1023. runmodel = manager.dict()
  1024. while True:
  1025. for i in modellist:
  1026. if i in contentlist:
  1027. if i not in runmodel:
  1028. #print(i)
  1029. #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
  1030. c.execute('select conf,cla from changestream where modelname = (?)',(i,))
  1031. rea = c.fetchall()
  1032. print(f'weights ={i[0]}.pt')
  1033. if i in ['duty','danager','inspection','cross','personcount']:
  1034. process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,[0],0,rea[0][1],rea[0][0],runmodel))
  1035. else:
  1036. if i in ['uniform','arm','helmet']:
  1037. process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel,True))
  1038. else:
  1039. process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel))
  1040. #elif i in ['helmet','fire','smoke','fall']:
  1041. #process = Process(target=modelfun,args=(queuelist,f'{i}.pt',dbpath,None,deid%2,True))
  1042. #else:
  1043. #process = Process(target=modelfun,args=(queuelist,f'{i}.pt',dbpath,None,deid%2,False))
  1044. #processes.append(process)
  1045. #process.start()
  1046. #detectobj = Process(target=detectdemo.infer,args=(queue,))
  1047. # Detect(weights=f'{i[0]}.pt')
  1048. time.sleep(3)
  1049. process.start()
  1050. deid = deid+1
  1051. runmodel[i] = 1
  1052. time.sleep(600)
  1053. task(c,conn,urlt,urla)
  1054. #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'fall' or modelname = 'uniform' or modelname = 'personcount' or modelname = 'arm' or modelname = 'bag'")
  1055. #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
  1056. cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
  1057. content = cursor.fetchall()
  1058. contentlist = []
  1059. for con in content:
  1060. contentlist.append(con[0])
  1061. #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
  1062. cursor = c.execute("SELECT address,channel from STREAM where modelname = 'fall'")
  1063. contenta = cursor.fetchall()
  1064. source = []
  1065. modellist = []
  1066. addcha = []
  1067. channellist = []
  1068. for i in contenta:
  1069. addcha.append((i[0],i[1]))
  1070. #modellist.append(i[1])
  1071. addcha = set(addcha)
  1072. addcha = sorted(addcha)
  1073. for a,cha in addcha:
  1074. source.append(a)
  1075. channellist.append(cha)
  1076. print(addcha)
  1077. #source = set(source)
  1078. source = list(source)
  1079. #source.sort()
  1080. cursor = c.execute("SELECT modelname from STREAM where (modelname = 'fall')")
  1081. contentm = cursor.fetchall()
  1082. for m in contentm:
  1083. modellist.append(m[0])
  1084. modellist = set(modellist)
  1085. n = len(content)
  1086. print(f'modelname={n}')
  1087. print(content)
  1088. #content.reverse()
  1089. print(content)
  1090. #pool.submit(detectobj.infer)
  1091. #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
  1092. #content = cursor.fetchall()
  1093. #n = len(content)
  1094. #print(f'modelname={n}')
  1095. #print(content)
  1096. #content.reverse()
  1097. #print(content)
  1098. # main(opt)
  1099. #processes = []
  1100. #pool = ProcessPoolExecutor(max_workers=n)
  1101. #for i in content:
  1102. #print(i)
  1103. #detectdemo=Detect(weights=f'{i[0]}.pt')
  1104. #process = Process(target=detectdemo.infer)
  1105. #processes.append(process)
  1106. #process.start()
  1107. #detectobj = Detect(weights=f'{i[0]}.pt')
  1108. # time.sleep(3)
  1109. #pool.submit(detectobj.infer)
  1110. # print('111111111111111111111111111111111111111111111111111111111')
  1111. #pool.submit(TestA().func1, '张三', i)
  1112. #print('----------------------------------------------------------------')
  1113. #time.sleep(3000)
  1114. # 等待所有进程执行完毕
  1115. #for process in processes:
  1116. # process.join()
  1117. #pool.submit(Detect(weights=f'{i[0]}.pt').infer)
  1118. # if isinstance(opt.weights,list):
  1119. # opt.weights = opt.weights[0]
  1120. #signal.signal(signal.SIGINT, my_handler)
  1121. #detectdemo1 = Detect(weights=f'{content[0][0]}.pt')
  1122. #detectdemo1.infer()
  1123. #a = Test
  1124. #with ProcessPoolExecutor(3) as ppool:
  1125. #for i in range(3):
  1126. # print('hello')
  1127. #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
  1128. #ppool.submit(TestA().func1, '张三', i)
  1129. #ta = TestA()
  1130. #with ProcessPoolExecutor(5) as ppool: # 创建一个5个进程的进程池
  1131. # for i in range(1, 4):
  1132. # ppool.submit(func1, '张三', i)
  1133. #f1= pool.submit(detectdemo1.infer)
  1134. # print("线程1-----------------------------------------------------------------------------------")
  1135. #detectdemo2 = Detect(weights=r"helmet.pt")
  1136. #f2=pool.submit(detectdemo2.infer)
  1137. # print("线程2-------------------------------------------------------------------------------------")
  1138. #detectdemo3 = threading.Thread(target=detectdemo3.infer)
  1139. #detectdemo3 = Detect(weights=r"fall.pt")
  1140. #f3=pool.submit(detectdemo3.infer)