detectopencvmutbig.py 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161
  1. # YOLOv5 �� by Ultralytics, AGPL-3.0 license
  2. """
  3. Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
  4. Usage - sources:
  5. $ python detect.py --weights yolov5s.pt --source 0 # webcam
  6. img.jpg # image
  7. vid.mp4 # video
  8. screen # screenshot
  9. path/ # directory
  10. list.txt # list of images
  11. list.streams # list of streams
  12. 'path/*.jpg' # glob
  13. 'https://youtu.be/Zgi9g1ksQHc' # YouTube
  14. 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
  15. Usage - formats:
  16. $ python detect.py --weights yolov5s.pt # PyTorch
  17. yolov5s.torchscript # TorchScript
  18. yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
  19. yolov5s_openvino_model # OpenVINO
  20. yolov5s.engine # TensorRT
  21. yolov5s.mlmodel # CoreML (macOS-only)
  22. yolov5s_saved_model # TensorFlow SavedModel
  23. yolov5s.pb # TensorFlow GraphDef
  24. yolov5s.tflite # TensorFlow Lite
  25. yolov5s_edgetpu.tflite # TensorFlow Edge TPU
  26. yolov5s_paddle_model # PaddlePaddle
  27. """
  28. import matplotlib.path as mat
  29. import requests
  30. import argparse
  31. import os
  32. import platform
  33. import sqlite3
  34. import sys
  35. import threading
  36. import time
  37. from pathlib import Path
  38. import signal
  39. import torch
  40. from concurrent.futures import ThreadPoolExecutor
  41. from concurrent.futures import ProcessPoolExecutor
  42. from multiprocessing import Process,Manager,Value
  43. from multiprocessing import Queue
  44. from multiprocessing import set_start_method
  45. import multiprocessing
  46. import multiprocessing as mp
  47. import numpy as np
  48. from torchvision import transforms
  49. FILE = Path(__file__).resolve()
  50. ROOT = FILE.parents[0] # YOLOv5 root directory
  51. if str(ROOT) not in sys.path:
  52. sys.path.append(str(ROOT)) # add ROOT to PATH
  53. ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
  54. from models.common import DetectMultiBackend
  55. from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLT,LoadStreamsSQLTN
  56. from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
  57. increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,apply_classifier1,apply_classifieruniform,compute_IOU,task,apply_classifierarm)
  58. from utils.plots import Annotator, colors, save_one_box
  59. from utils.torch_utils import select_device, smart_inference_mode
  60. from utils.renwu import newHelmet,newUniform,Fall,Personcount,Arm,Bag,Cross,Extinguisher,Persontre,Bag,Danager
  61. from transformers import AutoProcessor, AutoModelForVision2Seq
  62. #from testpool import func1,TestA
  63. # def my_handler(signum, frame):
  64. # exit(0)
  65. #url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
  66. url = "http://172.19.152.231/open/api/operate/upload"
  67. urlrtsp = "http://172.19.152.231/open/api/operate/previewURLs"
  68. urlt = "http://172.19.152.231/open/api/operate/taskList"
  69. urla = "http://172.19.152.231/open/api/operate/algorithmList"
  70. urlele = "http://172.19.152.231/open/api/operate/fence"
  71. urltime = "http://172.19.152.231/open/api/operate/getTime"
  72. urlperson = "http://172.19.152.231/open/api/operate/getPersonLimitNum"
  73. #modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person'}
  74. #algmodel = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51'}
  75. modelnamedir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'duty','98':'face','55':'oil','52':'jingdian','53':'rope','54':'personcar','39':'inspection','11':'reflective','12':'phone','66':'extinguisher','67':'tizi','68':'menjin','35':'arm','36':'persontre','33':'bag'}
  76. modellabeldir = {'0':'head,person','8':'person','10':'black_work_clothes,blue_work_clothes,person','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'person','98':'face','55':'oil','52':'person,hand,ball','53':'rope','54':'person','39':'person','11':'blue,greent,whitet,bluecoat,whitebarcoat,graycoat,baoan,chenyi,other','12':'phone','66':'extinguisher','67':'person,tizi','68':'person','35':'barearm,arm','36':'person,foot,cart,bag,box','33':'handbox,handbag'}
  77. modelalgdir = {}
  78. personcountdir = {}
  79. for key,value in modelnamedir.items():
  80. modelalgdir[value] = key
  81. taskmap = {'helmet':newHelmet,'uniform':newUniform,'fall':Fall,'personcount':Personcount,'arm':Arm,'bag':Bag,'cross':Cross,'extinguisher':Extinguisher,'persontre':Persontre,'bag':Bag,'danager':Danager}
  82. mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
  83. test = transforms.Compose([transforms.Resize((224,224)),
  84. #transforms.CenterCrop(224),
  85. transforms.ToTensor(),
  86. transforms.Normalize(mean=mean, std=std)
  87. ])
  88. def clapre(modelcla,claimg,clapoint):
  89. imgten = torch.stack(claimg,dim=0)
  90. clapoint = torch.stack(clapoint,dim=0)
  91. imgten = imgten.to(0)
  92. result = modelcla(imgten)
  93. result = F.softmax(result)
  94. print(result)
  95. index = result.argmax(1)
  96. index = index.cpu().numpy()
  97. index = np.argwhere(index<5)
  98. index = index.reshape(-1)
  99. print(index)
  100. if len(index)>0:
  101. print(clapoint[index])
  102. return clapoint[index]
  103. else:
  104. return None
  105. class YoloOpt:
  106. def __init__(self, weights=ROOT / 'yolov5s.pt',source=ROOT / 'data/images',data=ROOT / 'data/coco128.yaml',
  107. imgsz=(640,640),
  108. conf_thres=0.25,
  109. iou_thres=0.45,
  110. max_det=1000,
  111. device='',
  112. view_img=False,
  113. save_txt=False,
  114. save_conf=False,
  115. save_crop=False,
  116. nosave=True,
  117. classes=None,
  118. agnostic_nms=False,
  119. augment=False,
  120. visualize=False,
  121. update=False,
  122. project=ROOT / 'runs/detect',
  123. name='exp',
  124. exist_ok=False,
  125. line_thickness=1,
  126. hide_labels=False,
  127. hide_conf=False,
  128. half=False,
  129. dnn=False,
  130. vid_stride=10,
  131. classify=False,
  132. v8=False):
  133. self.weights = weights # 权重文件地址
  134. self.source = source # 待识别的图像
  135. self.data = data
  136. if imgsz is None:
  137. self.imgsz = (640, 640)
  138. self.imgsz = imgsz # 输入图片的大小,默认 (640,640)
  139. self.conf_thres = conf_thres # object置信度阈值 默认0.25 用在nms中
  140. self.iou_thres = iou_thres # 做nms的iou阈值 默认0.45 用在nms中
  141. self.device = device # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法
  142. self.view_img = view_img # 是否展示预测之后的图片或视频 默认False
  143. self.classes = classes # 只保留一部分的类别,默认是全部保留
  144. self.agnostic_nms = agnostic_nms # 进行NMS去除不同类别之间的框, 默认False
  145. self.augment = augment # augmented inference TTA测试时增强/多尺度预测,可以提分
  146. self.update = update # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
  147. self.exist_ok = exist_ok # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
  148. self.project = project # 保存测试日志的参数,本程序没有用到
  149. self.name = name # 每次实验的名称,本程序也没有用到
  150. self.max_det = max_det
  151. self.save_txt = save_txt
  152. self.save_conf= save_conf
  153. self.save_crop= save_crop
  154. self.nosave = nosave
  155. self.visualize = visualize
  156. self.line_thickness = line_thickness
  157. self.hide_labels = hide_labels
  158. self.hide_conf = hide_conf
  159. self.half = half
  160. self.dnn = dnn
  161. self.vid_stride = vid_stride
  162. self.classify = classify
  163. self.v8 = v8
  164. class Detect:
  165. def __init__(self, weights = ROOT / 'yolov5s.pt' , imgsz=(640,640),source="changshusql1103.db",classes=None,device=None,classify=False,conf_thres=0.25,v8=False):
  166. print(f'detectweights = {weights}')
  167. if v8:
  168. from ultralytics.nn.autobackend import AutoBackend
  169. from ultralytics.utils.ops import non_max_suppression
  170. else:
  171. from utils.general import non_max_suppression
  172. self.opt = YoloOpt(weights=weights, imgsz=imgsz,source=source,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
  173. self.source = str(self.opt.source)
  174. self.save_img = not self.opt.nosave and not source.endswith('.txt') # save inference images
  175. is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
  176. is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
  177. self.webcam = self.source.isnumeric() or source.endswith('.db') or (is_url and not is_file)
  178. screenshot = self.source.lower().startswith('screen')
  179. if is_url and is_file:
  180. self.source = check_file(self.source) # download
  181. self.save_dir = increment_path(Path(self.opt.project) / self.opt.name, exist_ok=self.opt.exist_ok) # increment run
  182. #self.save_dir = self.save_dir / Path(self.opt.weights).stem
  183. #self.save_dir.mkdir(parents=True, exist_ok=True)
  184. (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) # make dir
  185. print(f'device = {self.opt.device}')
  186. device = select_device(self.opt.device)
  187. if v8:
  188. self.model = AutoBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  189. if Path(weights).stem in ['arm', 'uniform']:
  190. if Path(weights).stem == 'arm':
  191. self.personmodel = AutoBackend('yolov8m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  192. elif Path(weights).stem == 'uniform':
  193. self.personmodel = AutoBackend('yolo11m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  194. else:
  195. self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  196. if Path(weights).stem in ['helmet','arm']:
  197. self.personmodel = DetectMultiBackend('personcount.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  198. self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
  199. self.classify = classify
  200. if self.classify:
  201. #if Path(weights).stem =='uniform':
  202. # self.classifier_model = AutoBackend(f"{Path(weights).stem}cls.pt",device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  203. #else:
  204. if Path(weights).stem != "arm":
  205. classifier_model = torch.load(f"{Path(weights).stem}cls.pt")
  206. self.classifier_model = classifier_model.to(device)
  207. self.classifier_model.eval()
  208. else:
  209. self.classifier_model = AutoBackend(f"{Path(weights).stem}cls.pt",device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  210. self.imgsz = check_img_size(self.opt.imgsz, s=self.stride)
  211. self.model.warmup(imgsz=(1 , 3, *self.imgsz))
  212. self.readpoint()
  213. print(self.imgsz)
  214. self.updatetime = time.time()
  215. self.updatemtime = time.time()
  216. self.filetime = os.path.getmtime(self.opt.weights)
  217. self.taskname = taskmap[Path(self.opt.weights).stem]()
  218. bs = 1 # batch_size
  219. if self.webcam:
  220. #self.view_img = check_imshow(warn=True)
  221. self.view_img = False
  222. # dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
  223. # bs = len(dataset)
  224. elif screenshot:
  225. dataset = LoadScreenshots(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt)
  226. else:
  227. dataset = LoadImages(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt, vid_stride=self.opt.vid_stride)
  228. t1 = threading.Thread(target=self.load,daemon=True)
  229. t1.start()
  230. @smart_inference_mode()
  231. def infer(self,queue,runmodel):
  232. pretime = time.time()
  233. seen, windows, self.dt = 0, [], (Profile(), Profile(), Profile())
  234. #
  235. # print ("数据库打开成功")
  236. while True:
  237. if time.localtime().tm_hour not in range(7,20):
  238. time.sleep(30)
  239. continue
  240. #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
  241. if time.time()-pretime>300:
  242. ret = self.readpoint()
  243. pretime = time.time()
  244. if not ret:
  245. print(f'{Path(self.opt.weights).stem} {runmodel}')
  246. runmodel.pop(Path(self.opt.weights).stem)
  247. print(f'{Path(self.opt.weights).stem} {runmodel}')
  248. break
  249. print(f'queuelen = {len(queue)}')
  250. for que in queue:
  251. if que.qsize() == 0:
  252. print('queuezero')
  253. time.sleep(0.01)
  254. if que.qsize() > 0:
  255. #if time.time()-pretime>300:
  256. # ret = self.readpoint()
  257. # pretime = time.time()
  258. # if not ret:
  259. # print(f'{Path(self.opt.weights).stem} {runmodel}')
  260. # runmodel.pop(Path(self.opt.weights).stem)
  261. # print(f'{Path(self.opt.weights).stem} {runmodel}')
  262. # break
  263. setframe = que.get()
  264. # print('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')
  265. #if setframe is not None
  266. path, im, im0s, vid_cap, s, videotime ,channels = setframe
  267. algchannel = list(self.dirmodel.keys())
  268. print(algchannel)
  269. print(path)
  270. algchannel = np.array(algchannel)
  271. channelsnp = np.array(channels)
  272. algindex = np.where(np.in1d(channelsnp, algchannel))[0]
  273. algindex = list(algindex)
  274. path = np.array(path)
  275. path = path[algindex]
  276. path = path.tolist()
  277. channels = np.array(channels)
  278. channels = channels[algindex]
  279. channels = channels.tolist()
  280. # print(algindex)
  281. if len(algindex)==0:
  282. continue
  283. #for ia in algindex:
  284. # print(type(im0s[ia]))
  285. # print(im0s[ia].shape)
  286. im = im[algindex]
  287. #for ia in algindex:
  288. # print(type(ia))
  289. try:
  290. im0s = np.asarray(im0s)
  291. except Exception:
  292. im0s = np.asarray(im0s,dtype=object)
  293. print(im0s.shape)
  294. im0s = im0s[algindex]
  295. # im0s = im0s.tolist()
  296. print(f'algindex = {algindex}')
  297. print(f'im0s ={im0s[0].shape}')
  298. videotime = np.array(videotime)
  299. videotime = videotime[algindex]
  300. videotime = tuple(map(tuple, videotime))
  301. # global tag
  302. # if self.contentid[0][3] == 1 :
  303. # dataset.close()
  304. # print('newstreaming=', self.contentid[0][3])
  305. # conn = sqlite3.connect(self.source)
  306. # c = conn.cursor()
  307. # c.execute("UPDATE CHANGESTREAM set streamimg = 0 , addstream=0,delstream=0 where modelname= (?)",(Path(self.opt.weights).stem,))
  308. # print(123)
  309. # conn.commit()
  310. # c.close()
  311. # conn.close()
  312. # print('opencv1')
  313. # cv2.destroyAllWindows()
  314. # print('opencv')
  315. # break
  316. # else:
  317. # print('nonewstreaming=', self.contentid[0][3])
  318. with self.dt[0]:
  319. im = torch.from_numpy(im).to(self.model.device)
  320. im = im.half() if self.model.fp16 else im.float() # uint8 to fp16/32
  321. im /= 255 # 0 - 255 to 0.0 - 1.0
  322. if len(im.shape) == 3:
  323. im = im[None] # expand for batch dim
  324. # Inference
  325. with self.dt[1]:
  326. visualize = increment_path(self.save_dir / Path(path).stem,
  327. mkdir=True) if self.opt.visualize else False
  328. #print('error')
  329. # print(self.model)
  330. pred = self.model(im, augment=self.opt.augment, visualize=visualize)
  331. self.postprocess(pred, path, im0s, im, s, videotime,channels)
  332. # print(f'predshape= {')
  333. # NMS
  334. #processlist = []
  335. #for i in range(3):
  336. # process = Process(target=self.postprocess,args=(pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s))
  337. # process = Process(target=self.preprocess)
  338. # process.start()
  339. # processlist.append(process)
  340. #for j in processlist:
  341. # j.join()
  342. #with ProcessPoolExecutor(3) as ppool:
  343. #for i in range(3):
  344. # print('hello')
  345. #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
  346. #ppool.submit(func1, '张三', i)
  347. #ppool.submit(self.preprocess)
  348. #self.postprocess(pred, path, im0s, dataset, im, s)
  349. #process = Process(target=self.postprocess, args=(pred, path, im0s, dataset, im, s))
  350. #self.postprocess(pred, path, im0s, im, s,videotime)
  351. #process.start()
  352. #def preprocess(self):
  353. # print('preprocess-----------------------------------------------')
  354. def postprocess(self, pred, path, im0s, im, s,videotime,channels):
  355. if time.time()-self.updatemtime>300:
  356. if self.filetime !=os.path.getmtime(self.opt.weights):
  357. device = select_device(self.opt.device)
  358. print("load new load")
  359. self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
  360. self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
  361. self.filetime = os.path.getmtime(self.opt.weights)
  362. #try:
  363. #if modelalgdir[Path(self.opt.weights).stem]!='0':
  364. print(modelalgdir[Path(self.opt.weights).stem])
  365. try:
  366. rea = requests.post(url=urla,data={'algorithmCode':modelalgdir[Path(self.opt.weights).stem]}).json()['data']
  367. con = rea[0]['confidence']
  368. self.opt.conf_thres = con
  369. except Exception:
  370. print('error')
  371. #else:
  372. # self.opt.conf_thres = 0.25
  373. #except Exception:
  374. #print('posturlaerror')
  375. self.updatemtime = time.time()
  376. seen = 0
  377. # dt = (Profile(), Profile(), Profile())
  378. print(f'senn = {seen}')
  379. windows = []
  380. if Path(self.opt.weights).stem:
  381. labelnamelist = []
  382. with self.dt[2]:
  383. #print(f'cropshape={pred.shape}')
  384. if self.opt.v8:
  385. from ultralytics.utils.ops import non_max_suppression
  386. else:
  387. from utils.general import non_max_suppression
  388. pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, self.opt.classes,
  389. self.opt.agnostic_nms, max_det=self.opt.max_det)
  390. # Second-stage classifier (optional)
  391. # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
  392. if self.classify and Path(self.opt.weights).stem!='persontre':
  393. if Path(self.opt.weights).stem == 'arm':
  394. pred = apply_classifierarm(pred,self.classifier_model,im,im0s,Path(self.opt.weights).stem)
  395. else:
  396. pred = apply_classifier1(pred,self.classifier_model,im,im0s,Path(self.opt.weights).stem)
  397. # Process predictions
  398. #print(f'predshape={pred.shape}')
  399. for i, det in enumerate(pred): # per image
  400. if time.time()-self.updatetime>300:
  401. dataele = {
  402. "algorithmCode": self.dirmodel[channels[i]]['classindex'],
  403. "algorithmIp":self.dirmodel[channels[i]]['algip'],
  404. "channel":self.dirmodel[channels[i]]['channel']
  405. }
  406. try:
  407. resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
  408. resultele = resultele.split(',||')
  409. resultele = tuple(resultele)
  410. point = '%s:'*len(resultele) %resultele
  411. if len(point[:-2])>1:
  412. self.dirmodel[channels[i]]['point'] = point[:-2]
  413. except Exception:
  414. print('post error')
  415. if Path(self.opt.weights).stem == 'personcount':
  416. try:
  417. resultper = requests.post(url=urlperson,data=dataele).json()['data']
  418. personcountdir[channels[i]] = int(resultper)
  419. except Exception:
  420. print('urlpersonerror')
  421. if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty' :
  422. datatime= {
  423. "algorithmCode": self.dirmodel[channels[i]]['classindex'],
  424. "algorithmIp":self.dirmodel[channels[i]]['algip'],
  425. "channel":self.dirmodel[channels[i]]['channel']
  426. }
  427. try:
  428. resulttime = requests.post(url=urltime,data=dataele).json()['data']
  429. self.dirmodel[channel]['durtime'] = int(resulttime)
  430. except Exception:
  431. print('posttime error')
  432. self.updatetime = time.time()
  433. seen += 1
  434. if self.webcam: # batch_size >= 1
  435. p, im0 = path[i], im0s[i].copy()
  436. s += f'{i}: '
  437. else:
  438. p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
  439. p = Path(p) # to Path
  440. save_path = str(self.save_dir / p.name) # im.jpg
  441. #txt_path = str(self.save_dir / 'labels' / p.stem) + (
  442. # '' #if dataset.mode == 'image' else f'_{frame}') # im.txt
  443. s += '%gx%g ' % im.shape[2:] # print string
  444. gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
  445. imc = im0.copy() # for save_crop
  446. annotator = Annotator(im0, line_width=self.opt.line_thickness, example=str(self.names))
  447. flag = False
  448. if len(det) and Path(self.opt.weights).stem != 'duty':
  449. #flag = True
  450. # Rescale boxes from img_size to im0 size
  451. det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
  452. # Print results
  453. for c in det[:, 5].unique():
  454. n = (det[:, 5] == c).sum() # detections per class
  455. s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
  456. # Write results
  457. if Path(self.opt.weights).stem in ['arm', 'uniform']:
  458. personpred = self.personmodel(im[i][None], None, None)
  459. personpred = non_max_suppression(personpred, 0.7, self.opt.iou_thres, 0,
  460. self.opt.agnostic_nms, max_det=self.opt.max_det)
  461. if len(personpred[0])==0:
  462. flag = False
  463. elif Path(self.opt.weights).stem == 'other':
  464. persondet = []
  465. personpred = personpred[0]
  466. personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
  467. for *perxyxy,conf,cls in reversed(personpred):
  468. print(perxyxy)
  469. x1,y1,x3,y3 = perxyxy
  470. x1,y1,x3,y3 = int(x1),int(y1),int(x3),int(y3)
  471. x2,y2 = x3,y1
  472. x4,y4 = x1,y3
  473. flag = self.taskname.getflag(det, persondet,annotator, self.dirmodel[channels[i]]['fence'],
  474. self.dirmodel[channels[i]]['point'], self.names,
  475. self.dirmodel[channels[i]]['label'])
  476. else:
  477. persondet = []
  478. personpred = personpred[0]
  479. personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
  480. for *perxyxy,conf,cls in reversed(personpred):
  481. print(perxyxy)
  482. if conf<0.8:
  483. continue
  484. x1,y1,x3,y3 = perxyxy
  485. x1,y1,x3,y3 = int(x1),int(y1),int(x3),int(y3)
  486. x2,y2 = x3,y1
  487. x4,y4 = x1,y3
  488. persondet.append([x1,y1,x2,y2,x3,y3,x4,y4])
  489. flag = self.taskname.getflag(det,persondet,annotator,self.dirmodel[channels[i]]['fence'],self.dirmodel[channels[i]]['point'],self.names,self.dirmodel[channels[i]]['label'])
  490. else:
  491. if Path(self.opt.weights).stem in ['personcount']:
  492. flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
  493. self.dirmodel[channels[i]]['point'], self.names,
  494. self.dirmodel[channels[i]]['label'],personcountdir[channels[i]])
  495. elif Path(self.opt.weights).stem in ['persontre']:
  496. flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
  497. self.dirmodel[channels[i]]['point'], self.names,
  498. self.dirmodel[channels[i]]['label'],1,imc)
  499. else:
  500. flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
  501. self.dirmodel[channels[i]]['point'], self.names,
  502. self.dirmodel[channels[i]]['label'])
  503. if flag:
  504. #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
  505. self.dirmodel[channels[i]]['detframe'].pop(0)
  506. self.dirmodel[channels[i]]['detframe'].append(1)
  507. self.dirmodel[channels[i]]['preim'] = annotator.result()
  508. self.dirmodel[channels[i]]['oripreim'] = imc
  509. self.dirmodel[channels[i]]['posttime'] = videotime[i]
  510. print(self.dirmodel[channels[i]]['detframe'])
  511. #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
  512. else:
  513. #print(f'deti= {i}')
  514. #print(detframe[i])
  515. #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
  516. self.dirmodel[channels[i]]['detframe'].pop(0)
  517. self.dirmodel[channels[i]]['detframe'].append(0)
  518. print(self.dirmodel[channels[i]]['detframe'])
  519. #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
  520. #print(detframe[i])
  521. # Stream results
  522. #im0 = annotator.result()
  523. #print(f'i = {i}')
  524. #print(channels[i])
  525. #print(postpretime[i])
  526. #print(detframe[i])
  527. if not self.dirmodel[channels[i]]['detflag'] and self.dirmodel[channels[i]]['detframe'].count(1)>=1:
  528. self.dirmodel[channels[i]]['detflag'] = True
  529. self.dirmodel[channels[i]]['detpretime'] = time.time()
  530. elif self.dirmodel[channels[i]]['detframe'].count(1)==0 :
  531. self.dirmodel[channels[i]]['detflag'] = False
  532. self.dirmodel[channels[i]]['detpretime'] = float('inf')
  533. # Stream results
  534. #im0 = annotator.result()
  535. if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and time.time() - self.dirmodel[channels[i]]['detpretime'] > self.dirmodel[channels[i]]['durtime'] and self.dirmodel[channels[i]]['detflag']:
  536. #print()
  537. #if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and self.dirmodel[channels[i]]['detflag']:
  538. #print(time.time() - self.dirmodel[channels[i]]['detpretime'])
  539. #print(self.dirmodel[channels[i]]['detflag'])
  540. print('post-------------------------------------------------------------------------')
  541. #time.sleep(30)
  542. #print(time.time() - postpretime[i])
  543. #print('111111111111111111111111111111111111111111111111')
  544. #print(dirmodel[channels[i]]['preim'].shape)
  545. success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim'])
  546. content = encoded_image.tobytes()
  547. successori, encoded_imageori = cv2.imencode('.jpg', self.dirmodel[channels[i]]['oripreim'])
  548. contentori = encoded_imageori.tobytes()
  549. filename = f'{p.stem}_{int(time.time())}.jpg'
  550. filenameori = f'ori{p.stem}_{int(time.time())}.jpg'
  551. print(f'str(p) {p.name}')
  552. print(channels[i])
  553. payload = {'channel': self.dirmodel[channels[i]]['channel'],
  554. 'classIndex': self.dirmodel[channels[i]]['classindex'],
  555. 'ip': self.dirmodel[channels[i]]['algip'],
  556. 'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', self.dirmodel[channels[i]]['posttime']),
  557. 'videoUrl': channels[i]}
  558. files = [
  559. ('file', (filename, content, 'image/jpeg')),
  560. ('oldFile', (filenameori, contentori, 'image/jpeg')),
  561. ]
  562. try:
  563. result = requests.post(url, data=payload, files=files)
  564. print(result)
  565. except Exception:
  566. print('posterror')
  567. #time.sleep(3000)
  568. self.dirmodel[channels[i]]['postpretime'] = time.time()
  569. self.dirmodel[channels[i]]['detflag'] = False
  570. timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime'])
  571. year = time.strftime('%Y',time.localtime(time.time()))
  572. month = time.strftime('%m',time.localtime(time.time()))
  573. day = time.strftime('%d',time.localtime(time.time()))
  574. savefold = f'/mnt/project/images/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
  575. savefold = Path(savefold)
  576. savefold.mkdir(parents=True,exist_ok=True)
  577. detsavefold = f'/mnt/project/detimages/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
  578. detsavefold = Path(detsavefold)
  579. detsavefold.mkdir(parents=True,exist_ok=True)
  580. cv2.imwrite(f'{savefold}/{timesave}.jpg',self.dirmodel[channels[i]]['oripreim'])
  581. cv2.imwrite(f'{detsavefold}/{timesave}det.jpg',self.dirmodel[channels[i]]['preim'])
  582. #if self.dirmodel[channels[i]]['detframe'].count(1)==0:
  583. # self.dirmodel[channels[i]]['detflag'] = False
  584. #time.sleep(1)
  585. self.view_img = False
  586. if self.view_img:
  587. if platform.system() == 'Linux' and p not in windows:
  588. windows.append(p)
  589. cv2.namedWindow(f'{str(p)}-{Path(self.opt.weights).stem}',
  590. cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
  591. cv2.resizeWindow(f'{str(p)}-{Path(self.opt.weights).stem}', im0.shape[1], im0.shape[0])
  592. im1 = cv2.resize(im0, (1280, 720))
  593. cv2.imshow(f'{str(p)}-{Path(self.opt.weights).stem}', im1)
  594. cv2.waitKey(1) # 1 millisecond
  595. # Save results (image with detections)
  596. # Print time (inference-only)
  597. print(f'channels[i]={channels[i]}')
  598. LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms {str(p)}-{Path(self.opt.weights).stem}")
  599. def load(self):
  600. conn = sqlite3.connect(self.source)
  601. c = conn.cursor()
  602. while True:
  603. #
  604. # print ("数据库打开成功")
  605. cursor = c.execute(
  606. "SELECT modelname, addstream,delstream,streaming from CHANGESTREAM WHERE modelname= (?)",(Path(self.opt.weights).stem,))
  607. # content = cursor.fetchall()
  608. # if content[0][1] ==1 or content[0][2] ==1:
  609. # c.execute("UPDATE CHANGESTREAM set streamimg = 1 where modelname='yolov5s'")
  610. # print("updata changestream")
  611. # conn.commit()
  612. # cursor = c.execute(
  613. # "SELECT modelname, addstream,delstream,streamimg from CHANGESTREAM WHERE modelname='yolov5s'")
  614. self.contentid = cursor.fetchall()
  615. #global tag
  616. #tag = Value('i', self.contentid[0][3])
  617. #print(tag.value==1)
  618. print(f'loadcontent={self.contentid[0][3]}')
  619. time.sleep(3)
  620. c.close()
  621. conn.close()
  622. def readpoint(self):
  623. data = {
  624. "algorithmCode": modelalgdir[Path(self.opt.weights).stem],
  625. "deviceIp":None,
  626. 'fwqCode':None
  627. }
  628. self.dirmodel = {}
  629. result = requests.post(url=urlt,data=data).json()['data']
  630. channell=[]
  631. for info in result:
  632. #content = cursor.fetchall()
  633. #self.dirmodel = {}
  634. #for address,fence,point,channel,classindex,ip ,algip,label,durtime in content:
  635. #address = f'{address[:-1]}0'
  636. channel = info["deviceChannel"]
  637. if Path(self.opt.weights).stem == "danager" and channel =='45':
  638. continue
  639. channell.append(channel)
  640. self.dirmodel[channel] = {}
  641. self.dirmodel[channel]['fence'] = 1 if len(info["electricFence"])>0 else 0
  642. if Path(self.opt.weights).stem == "uniform":
  643. self.dirmodel[channel]['fence'] = 1
  644. #self.dirmodel[channel]['point'] = point
  645. self.dirmodel[channel]['channel'] = info['deviceChannel']
  646. self.dirmodel[channel]['classindex'] = info['algorithmCode']
  647. self.dirmodel[channel]['ip'] = info['deviceIp']
  648. self.dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
  649. dataele = {
  650. "algorithmCode": self.dirmodel[channel]['classindex'],
  651. "algorithmIp":self.dirmodel[channel]['algip'],
  652. "channel":self.dirmodel[channel]['channel']
  653. }
  654. resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
  655. resultele = resultele.split(',||')
  656. resultele = tuple(resultele)
  657. point = '%s:'*len(resultele) %resultele
  658. if Path(self.opt.weights).stem == 'personcount':
  659. resultper = requests.post(url=urlperson,data=dataele).json()['data']
  660. personcountdir[channel] = int(resultper)
  661. if (Path(self.opt.weights).stem == "uniform" or Path(self.opt.weights).stem == "fall") and len(point[:-2])<=1:
  662. self.dirmodel[channel]['point'] = "150#144,1100#144,1100#550,150#550"
  663. else:
  664. self.dirmodel[channel]['point'] = point[:-2]
  665. self.dirmodel[channel]['preim'] = None
  666. self.dirmodel[channel]['oripreim'] = None
  667. self.dirmodel[channel]['detframe'] = [0 for _ in range(2)]
  668. self.dirmodel[channel]['postpretime'] = 0
  669. self.dirmodel[channel]['detflag'] = False
  670. self.dirmodel[channel]['detpretime'] = float('inf')
  671. self.dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
  672. if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty' :
  673. datatime= {
  674. "algorithmCode": self.dirmodel[channel]['classindex'],
  675. "algorithmIp":self.dirmodel[channel]['algip'],
  676. "channel":self.dirmodel[channel]['channel']
  677. }
  678. resulttime = requests.post(url=urltime,data=dataele).json()['data']
  679. self.dirmodel[channel]['durtime'] = int(resulttime)
  680. else:
  681. self.dirmodel[channel]['durtime'] = 0
  682. self.dirmodel[channel]['posttime'] = 0
  683. print(self.dirmodel)
  684. return sorted(channell)
  685. # str = str.split(":")
  686. # lista = []
  687. # for liststr in str:
  688. # if len(liststr) > 0:
  689. # li = liststr.split(',')
  690. # listpoint = []
  691. # for i, j in zip(li[::2], li[1::2]):
  692. # listpoint.append((i, j))
  693. # lista.append(listpoint)
  694. # return listpoint
  695. #def preprocess():
  696. # print('preprocess-----------------------------------------------')
  697. def getframe(queuelist,channelsl,source,tt,numworks,lock,numworkv):
  698. while True:
  699. print("dataloader")
  700. imgsz = [768, 768]
  701. print(f'source = {source}')
  702. dataset = LoadStreamsSQLTN(channelsl,source, img_size=832,
  703. auto=True, vid_stride=20, tt=tt,numworks = numworks)
  704. bs = len(dataset)
  705. vid_path, vid_writer = [None] * bs, [None] * bs
  706. # self.detframe = [[0 for _ in range(8)] for i in range(bs)]
  707. # self.postpretime = [0]*bs
  708. # Run inference
  709. #imgsz = (1 , 3, *self.imgsz)
  710. print(imgsz)
  711. #self.model.warmup(imgsz=(1 , 3, *imgsz)) # warmup
  712. seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
  713. #
  714. # print ("数据库打开成功")
  715. pretime = time.time()
  716. tag = 0
  717. sourcebase = 'project0117.db'
  718. for path, im, im0s, vid_cap, s, videotime,channels in dataset:
  719. # print('*'*21)
  720. # global tag
  721. # print('datasetvideo')
  722. # if time.time()-pretime > 300:
  723. # pretime = time.time()
  724. # conn = sqlite3.connect(sourcebase)
  725. # c = conn.cursor()
  726. # cursor = c.execute("SELECT modelname, addstream,delstream,streaming from CHANGESTREAM WHERE modelname= 'helmet'")
  727. # contentid = cursor.fetchall()
  728. # tag = contentid[0][3]
  729. # if tag == 1:
  730. # lock.acquire()
  731. # numworkv.value += 1
  732. # dataset.close()
  733. # if numworkv.value==3:
  734. # print('newstreaming=', tag)
  735. # conn = sqlite3.connect(source)
  736. # c = conn.cursor()
  737. # c.execute("UPDATE CHANGESTREAM set streaming = 0 , addstream=0,delstream=0 where modelname='helmet'")
  738. # print(123)
  739. # conn.commit()
  740. # c.close()
  741. # conn.close()
  742. # lock.release()
  743. # print('opencv1')
  744. # # cv2.destroyAllWindows()
  745. # print('opencv')
  746. # break
  747. # else:
  748. # print('nonewstreaming=', tag)
  749. if time.time()-pretime > 300:
  750. channellist = []
  751. pretime = time.time()
  752. data = {
  753. "algorithmCode": None,
  754. "deviceIp":None,
  755. "fwqCode":None
  756. }
  757. try:
  758. result = requests.post(url=urlt,data=data).json()['data']
  759. except Exception:
  760. result = []
  761. for info in result:
  762. data = {
  763. "channel": info["deviceChannel"],
  764. "ip": info["deviceAlgorithmIp"]
  765. }
  766. chaflag = any(info["deviceChannel"] in t for t in channellist)
  767. #personcountdir[channel] = num
  768. if not chaflag:
  769. address = requests.post(url=urlrtsp,data=data).json()['msg']
  770. channellist.append((info['deviceChannel'],address))
  771. channelsa = []
  772. sourcea = []
  773. channellist = set(channellist)
  774. channellist = sorted(channellist,key=lambda x:x[0])
  775. #channellist = set(channellist)
  776. for cha,add in channellist:
  777. channelsa.append(cha)
  778. sourcea.append(add)
  779. channelsl = sorted(channelsl)
  780. #channelsa = sorted(channelsa)
  781. if channelsa!=channelsl and len(channelsa)>0:
  782. print(f'channelsa = {channelsa}')
  783. print(f'channelsl = {channelsl}')
  784. dataset.close()
  785. channelsl = channelsa
  786. source = sourcea
  787. break;
  788. for key,value in queuelist.items():
  789. hour = time.localtime(time.time()).tm_hour
  790. if hour in range(7,18):
  791. value[-1].put((path, im, im0s, vid_cap, s, videotime,channels))
  792. value[-1].get() if value[-1].qsize() == 10 else time.sleep(0.001)
  793. def getmutpro(channels,source,streamlist,numworkv,lock,numworks=1,modellen=None):
  794. processlist = []
  795. queuelist = {}
  796. for i in range(numworks):
  797. for model in modellen:
  798. queue = Queue(maxsize=10)
  799. queuelist.setdefault(model,[])
  800. queuelist[model].append(queue)
  801. process = Process(target=getframe,
  802. args=(queuelist, channels,source, i,numworks,lock,numworkv))
  803. processlist.append(process)
  804. process.start()
  805. #queuelist.append(queue)
  806. return queuelist
  807. # path = []
  808. # im0s = []
  809. # vid_cap = None
  810. # s = ''
  811. # videotime = []
  812. # while True:
  813. # imlist = []
  814. # pathlist = []
  815. # im0slist = []
  816. # channelslist = []
  817. # vid_cap = None
  818. # s = ''
  819. # videotimelist = []
  820. # for q in queuelist:
  821. # if q.qsize()>0:
  822. # setframe = q.get()
  823. # path, im, im0s, vid_cap, s, videotime ,channels = setframe
  824. # pathlist += path
  825. # channelslist +=channels
  826. # im0slist += im0s
  827. # videotimelist += videotime
  828. # imlist.append(im)
  829. # if len(imlist)>0:
  830. # im = np.concatenate(imlist)
  831. # if len(pathlist)>0:
  832. # print(len(path),im.shape,len(im0s))
  833. # streamlist.append((pathlist, im, im0slist, vid_cap, s, videotimelist,channelslist))
  834. #print(f'streamlist = {len(streamlist)}')
  835. # streamlist.pop(0) if len(streamlist) > 3 else time.sleep(0.001)
  836. def modelfun(queue,weights,sourcedb,classes,device,classify,conf_thres,runmodel,v8=False):
  837. print(weights)
  838. detectdemo=Detect(weights=weights,source=sourcedb,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
  839. detectdemo.infer(queue,runmodel)
  840. def parse_opt():
  841. parser = argparse.ArgumentParser()
  842. parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
  843. opt = parser.parse_args()
  844. return opt
  845. def main(opt):
  846. check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
  847. run(**vars(opt))
  848. if __name__ == '__main__':
  849. #torch.multiprocessing.set_start_method('spawn')
  850. #set_start_method('spawn')
  851. opt = parse_opt()
  852. DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
  853. processor = AutoProcessor.from_pretrained(r"/mnt/liq_weight/smolVLM")
  854. model = AutoModelForVision2Seq.from_pretrained(
  855. r"/mnt/liq_weight/smolVLM",
  856. torch_dtype=torch.bfloat16,
  857. # _attn_implementation="flash_attention_2" if DEVICE == "cuda" else "eager",
  858. ).to(DEVICE)
  859. dbpath = 'projectnew.db'
  860. conn = sqlite3.connect(dbpath)
  861. #
  862. # print ("数据库打开成功")
  863. c = conn.cursor()
  864. task(c,conn,urlt,urla)
  865. cursor = c.execute('select channel,algip from stream ')
  866. result = cursor.fetchall()
  867. for channel ,algip in result:
  868. data = {
  869. "channel": channel,
  870. "ip":algip
  871. }
  872. #personcountdir[channel] = num
  873. address = requests.post(url=urlrtsp,data=data).json()['msg']
  874. c.execute('UPDATE STREAM set address= (?) where channel =(?)',(address,channel))
  875. conn.commit()
  876. cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname ='duty' or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname= 'arm' or modelname = 'persontre' or modelname = 'bag'")
  877. #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
  878. content = cursor.fetchall()
  879. cursor = c.execute("SELECT address,channel from STREAM ")
  880. #cursor = c.execute("SELECT address from STREAM where modelname = 'helmet'")
  881. contenta = cursor.fetchall()
  882. source = []
  883. modellist = []
  884. addcha = []
  885. channellist = []
  886. for i in contenta:
  887. addcha.append((i[0],i[1]))
  888. #modellist.append(i[1])
  889. addcha = set(addcha)
  890. addcha = sorted(addcha,key=lambda x:x[1])
  891. for add,cha in addcha:
  892. source.append(add)
  893. channellist.append(cha)
  894. #source = set(source)
  895. print(addcha)
  896. source = list(source)
  897. cursor = c.execute("SELECT modelname from STREAM where (modelname ='helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname = 'duty' or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag')")
  898. contentm = cursor.fetchall()
  899. for m in contentm:
  900. modellist.append(m[0])
  901. modellist = set(modellist)
  902. modellist = list(modellist)
  903. contentlist = []
  904. for i in content:
  905. contentlist.append(i[0])
  906. #source.sort()
  907. n = len(content)
  908. print(f'modelname={n}')
  909. print(content)
  910. #content.reverse()
  911. print(content)
  912. print(source)
  913. # main(opt)
  914. #processes = []
  915. streamqueue = Queue(maxsize=4)
  916. numworkv = Value('i', 0)
  917. manager = Manager()
  918. lock = multiprocessing.Lock()
  919. streamlist = manager.list()
  920. numworks = 7
  921. modellen = []
  922. for i in modellist:
  923. if i in contentlist:
  924. modellen.append(i)
  925. queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks,modellen)
  926. deid = 0
  927. #pool = ThreadPoolExecutor(max_workers=n)
  928. runmodel = manager.dict()
  929. while True:
  930. for i in modellist:
  931. if i in contentlist:
  932. if i not in runmodel:
  933. #print(i)
  934. #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
  935. c.execute('select conf,cla from changestream where modelname = (?)',(i,))
  936. rea = c.fetchall()
  937. print(f'weights ={i[0]}.pt')
  938. if i in ['duty','danager','inspection','cross','personcount']:
  939. process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,[0],0,rea[0][1],rea[0][0],runmodel,True))
  940. else:
  941. if i in ['fall','extinguisher']:
  942. process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel))
  943. else:
  944. process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel,True))
  945. #elif i in ['helmet','fire','smoke','fall']:
  946. #process = Process(target=modelfun,args=(queuelist,f'{i}.pt',dbpath,None,deid%2,True))
  947. #else:
  948. #process = Process(target=modelfun,args=(queuelist,f'{i}.pt',dbpath,None,deid%2,False))
  949. #processes.append(process)
  950. #process.start()
  951. #detectobj = Process(target=detectdemo.infer,args=(queue,))
  952. # Detect(weights=f'{i[0]}.pt')
  953. time.sleep(3)
  954. process.start()
  955. deid = deid+1
  956. runmodel[i] = 1
  957. time.sleep(600)
  958. task(c,conn,urlt,urla)
  959. #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'fall' or modelname = 'uniform' or modelname = 'personcount' or modelname = 'arm' or modelname = 'bag'")
  960. #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
  961. cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname ='duty' or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag'")
  962. content = cursor.fetchall()
  963. contentlist = []
  964. for con in content:
  965. contentlist.append(con[0])
  966. #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
  967. cursor = c.execute("SELECT address,channel from STREAM ")
  968. contenta = cursor.fetchall()
  969. source = []
  970. modellist = []
  971. addcha = []
  972. channellist = []
  973. for i in contenta:
  974. addcha.append((i[0],i[1]))
  975. #modellist.append(i[1])
  976. addcha = set(addcha)
  977. addcha = sorted(addcha)
  978. for a,cha in addcha:
  979. source.append(a)
  980. channellist.append(cha)
  981. print(addcha)
  982. #source = set(source)
  983. source = list(source)
  984. #source.sort()
  985. cursor = c.execute("SELECT modelname from STREAM where (modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname = 'duty' or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag')")
  986. contentm = cursor.fetchall()
  987. for m in contentm:
  988. modellist.append(m[0])
  989. modellist = set(modellist)
  990. n = len(content)
  991. print(f'modelname={n}')
  992. print(content)
  993. #content.reverse()
  994. print(content)
  995. #pool.submit(detectobj.infer)
  996. #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
  997. #content = cursor.fetchall()
  998. #n = len(content)
  999. #print(f'modelname={n}')
  1000. #print(content)
  1001. #content.reverse()
  1002. #print(content)
  1003. # main(opt)
  1004. #processes = []
  1005. #pool = ProcessPoolExecutor(max_workers=n)
  1006. #for i in content:
  1007. #print(i)
  1008. #detectdemo=Detect(weights=f'{i[0]}.pt')
  1009. #process = Process(target=detectdemo.infer)
  1010. #processes.append(process)
  1011. #process.start()
  1012. #detectobj = Detect(weights=f'{i[0]}.pt')
  1013. # time.sleep(3)
  1014. #pool.submit(detectobj.infer)
  1015. # print('111111111111111111111111111111111111111111111111111111111')
  1016. #pool.submit(TestA().func1, '张三', i)
  1017. #print('----------------------------------------------------------------')
  1018. #time.sleep(3000)
  1019. # 等待所有进程执行完毕
  1020. #for process in processes:
  1021. # process.join()
  1022. #pool.submit(Detect(weights=f'{i[0]}.pt').infer)
  1023. # if isinstance(opt.weights,list):
  1024. # opt.weights = opt.weights[0]
  1025. #signal.signal(signal.SIGINT, my_handler)
  1026. #detectdemo1 = Detect(weights=f'{content[0][0]}.pt')
  1027. #detectdemo1.infer()
  1028. #a = Test
  1029. #with ProcessPoolExecutor(3) as ppool:
  1030. #for i in range(3):
  1031. # print('hello')
  1032. #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
  1033. #ppool.submit(TestA().func1, '张三', i)
  1034. #ta = TestA()
  1035. #with ProcessPoolExecutor(5) as ppool: # 创建一个5个进程的进程池
  1036. # for i in range(1, 4):
  1037. # ppool.submit(func1, '张三', i)
  1038. #f1= pool.submit(detectdemo1.infer)
  1039. # print("线程1-----------------------------------------------------------------------------------")
  1040. #detectdemo2 = Detect(weights=r"helmet.pt")
  1041. #f2=pool.submit(detectdemo2.infer)
  1042. # print("线程2-------------------------------------------------------------------------------------")
  1043. #detectdemo3 = threading.Thread(target=detectdemo3.infer)
  1044. #detectdemo3 = Detect(weights=r"fall.pt")
  1045. #f3=pool.submit(detectdemo3.infer)