detectopencvmut03autalq.py 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935
  1. # �� by Ultralytics, AGPL-3.0 license
  2. """
  3. Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
  4. Usage - sources:
  5. $ python detect.py --weights yolov5s.pt --source 0 # webcam
  6. img.jpg # image
  7. vid.mp4 # video
  8. screen # screenshot
  9. path/ # directory
  10. list.txt # list of images
  11. list.streams # list of streams
  12. 'path/*.jpg' # glob
  13. 'https://youtu.be/Zgi9g1ksQHc' # YouTube
  14. 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
  15. Usage - formats:
  16. $ python detect.py --weights yolov5s.pt # PyTorch
  17. yolov5s.torchscript # TorchScript
  18. yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
  19. yolov5s_openvino_model # OpenVINO
  20. yolov5s.engine # TensorRT
  21. yolov5s.mlmodel # CoreML (macOS-only)
  22. yolov5s_saved_model # TensorFlow SavedModel
  23. yolov5s.pb # TensorFlow GraphDef
  24. yolov5s.tflite # TensorFlow Lite
  25. yolov5s_edgetpu.tflite # TensorFlow Edge TPU
  26. yolov5s_paddle_model # PaddlePaddle
  27. """
  28. import matplotlib.path as mat
  29. import requests
  30. import argparse
  31. import os
  32. import platform
  33. import sqlite3
  34. import sys
  35. import threading
  36. import time
  37. from pathlib import Path
  38. import signal
  39. import torch
  40. from concurrent.futures import ThreadPoolExecutor
  41. from concurrent.futures import ProcessPoolExecutor
  42. from multiprocessing import Process, Manager, Value
  43. from multiprocessing import Queue
  44. from multiprocessing import set_start_method
  45. import multiprocessing
  46. import multiprocessing as mp
  47. import numpy as np
  48. from torchvision import transforms
  49. import random
  50. FILE = Path(__file__).resolve()
  51. ROOT = FILE.parents[0] # YOLOv5 root directory
  52. if str(ROOT) not in sys.path:
  53. sys.path.append(str(ROOT)) # add ROOT to PATH
  54. ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
  55. from models.common import DetectMultiBackend
  56. from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams, LoadStreamsSQLNEWN, LoadStreamsSQL, \
  57. LoadStreamsSQLNRERT, LoadStreamsVEight, LoadStreamsSQLT, LoadStreamsSQLTNP,preprocess
  58. from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
  59. increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,
  60. strtolst, apply_classifier1, apply_classifieruniform, compute_IOU, task, apply_classifierarm,numpy_image_to_base64)
  61. from utils.plots import Annotator, colors, save_one_box
  62. from utils.torch_utils import select_device, smart_inference_mode
  63. from utils.renwu import newHelmet, newUniform, newFall, Personcount, Arm, Bag, Cross, Extinguisher, newPersontre, Bag, Danager,CarHelmetBelt,Clear
  64. from loguru import logger
  65. # from testpool import func1,TestA
  66. # def my_handler(signum, frame):
  67. # exit(0)
  68. # url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
  69. url = "http://172.19.152.231/open/api/operate/upload"
  70. urlrtsp = "http://172.19.152.231/open/api/operate/previewURLs"
  71. urlt = "http://172.19.152.231/open/api/operate/taskList"
  72. urla = "http://172.19.152.231/open/api/operate/algorithmList"
  73. urlele = "http://172.19.152.231/open/api/operate/fence"
  74. urltime = "http://172.19.152.231/open/api/operate/getTime"
  75. urlperson = "http://172.19.152.231/open/api/operate/getPersonLimitNum"
  76. urlque = "http://172.19.152.231:8001/analyze/"
  77. questiondir = {
  78. "uniform":"图片中的所有人都穿工作服了吗,请回答是或否",
  79. "fall":"图片中的人摔倒了吗,请回答是或否",
  80. "persontre":"图片中的人坐踩手推车了吗,请回答是或否",
  81. "helmet":"图片中的人戴工作帽了吗,清回答是或否"
  82. }
  83. # modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person'}
  84. # algmodel = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51'}
  85. modelnamedir = {'0': 'helmet', '8': 'danager', '10': 'uniform', '14': 'smoke', '16': 'fire', '21': 'cross',
  86. '25': 'fall', '29': 'occupancy', '30': 'liquid', '31': 'pressure', '32': 'sleep', '34': 'personcount',
  87. '37': 'other', '38': 'duty', '98': 'face', '55': 'oil', '52': 'jingdian', '53': 'rope',
  88. '54': 'personcar', '39': 'inspection', '11': 'reflective', '12': 'phone', '66': 'extinguisher',
  89. '67': 'belt', '68': 'menjin', '35': 'arm', '36': 'persontre', '33': 'bag','62':'clear'}
  90. modellabeldir = {'0': 'head,person', '8': 'person', '10': 'black_work_clothes,blue_work_clothes,person', '14': 'smoke',
  91. '16': 'fire', '21': 'cross', '25': 'fall', '29': 'car', '30': 'liquid', '31': 'pressure',
  92. '32': 'sleep', '34': 'personcount', '37': 'other', '38': 'person', '98': 'face', '55': 'oil',
  93. '52': 'person,hand,ball', '53': 'rope', '54': 'person', '39': 'person',
  94. '11': 'blue,greent,whitet,bluecoat,whitebarcoat,graycoat,baoan,chenyi,other', '12': 'phone',
  95. '66': 'extinguisher', '67': 'person,head,helmet,belt', '68': 'person', '35': 'barearm,arm',
  96. '36': 'person,foot,cart,bag,box', '33': 'handbox,handbag','62':'hand'}
  97. modelalgdir = {}
  98. personcountdir = {}
  99. for key, value in modelnamedir.items():
  100. modelalgdir[value] = key
  101. taskmap = {'helmet': newHelmet, 'uniform': newUniform, 'fall': newFall, 'personcount': Personcount, 'arm': Arm, 'bag': Bag,
  102. 'cross': Cross, 'extinguisher': Extinguisher, 'persontre': newPersontre, 'bag': Bag, 'danager': Danager,'belt':CarHelmetBelt,'clear':Clear}
  103. mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
  104. test = transforms.Compose([transforms.Resize((224, 224)),
  105. # transforms.CenterCrop(224),
  106. transforms.ToTensor(),
  107. transforms.Normalize(mean=mean, std=std)
  108. ])
  109. def clapre(modelcla, claimg, clapoint):
  110. imgten = torch.stack(claimg, dim=0)
  111. clapoint = torch.stack(clapoint, dim=0)
  112. imgten = imgten.to(0)
  113. result = modelcla(imgten)
  114. result = F.softmax(result)
  115. print(result)
  116. index = result.argmax(1)
  117. index = index.cpu().numpy()
  118. index = np.argwhere(index < 5)
  119. index = index.reshape(-1)
  120. print(index)
  121. if len(index) > 0:
  122. print(clapoint[index])
  123. return clapoint[index]
  124. else:
  125. return None
  126. class YoloOpt:
  127. def __init__(self, weights=ROOT / 'yolov5s.pt', source=ROOT / 'data/images', data=ROOT / 'data/coco128.yaml',
  128. imgsz=(640, 640),
  129. conf_thres=0.25,
  130. iou_thres=0.45,
  131. max_det=1000,
  132. device='',
  133. view_img=False,
  134. save_txt=False,
  135. save_conf=False,
  136. save_crop=False,
  137. nosave=True,
  138. classes=None,
  139. agnostic_nms=False,
  140. augment=False,
  141. visualize=False,
  142. update=False,
  143. project=ROOT / 'runs/detect',
  144. name='exp',
  145. exist_ok=False,
  146. line_thickness=1,
  147. hide_labels=False,
  148. hide_conf=False,
  149. half=False,
  150. dnn=False,
  151. vid_stride=10,
  152. classify=False,
  153. v8=False):
  154. self.weights = weights # 权重文件地址
  155. self.source = source # 待识别的图像
  156. self.data = data
  157. if imgsz is None:
  158. self.imgsz = (640, 640)
  159. self.imgsz = imgsz # 输入图片的大小,默认 (640,640)
  160. self.conf_thres = conf_thres # object置信度阈值 默认0.25 用在nms中
  161. self.iou_thres = iou_thres # 做nms的iou阈值 默认0.45 用在nms中
  162. self.device = device # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法
  163. self.view_img = view_img # 是否展示预测之后的图片或视频 默认False
  164. self.classes = classes # 只保留一部分的类别,默认是全部保留
  165. self.agnostic_nms = agnostic_nms # 进行NMS去除不同类别之间的框, 默认False
  166. self.augment = augment # augmented inference TTA测试时增强/多尺度预测,可以提分
  167. self.update = update # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
  168. self.exist_ok = exist_ok # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
  169. self.project = project # 保存测试日志的参数,本程序没有用到
  170. self.name = name # 每次实验的名称,本程序也没有用到
  171. self.max_det = max_det
  172. self.save_txt = save_txt
  173. self.save_conf = save_conf
  174. self.save_crop = save_crop
  175. self.nosave = nosave
  176. self.visualize = visualize
  177. self.line_thickness = line_thickness
  178. self.hide_labels = hide_labels
  179. self.hide_conf = hide_conf
  180. self.half = half
  181. self.dnn = dnn
  182. self.vid_stride = vid_stride
  183. self.classify = classify
  184. self.v8 = v8
  185. class Detect:
  186. def __init__(self, weights=ROOT / 'yolov5s.pt', imgsz=(640, 640), source="changshusql1103.db", classes=None,
  187. device=None, classify=False, conf_thres=0.25, v8=False):
  188. print(f'detectweights = {weights}')
  189. if v8:
  190. from ultralytics.nn.autobackend import AutoBackend
  191. from ultralytics.utils.ops import non_max_suppression
  192. else:
  193. from utils.general import non_max_suppression
  194. self.opt = YoloOpt(weights=weights, imgsz=imgsz, source=source, classes=classes, device=device,
  195. classify=classify, conf_thres=conf_thres, v8=v8)
  196. self.source = str(self.opt.source)
  197. self.save_img = not self.opt.nosave and not source.endswith('.txt') # save inference images
  198. is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
  199. is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
  200. self.webcam = self.source.isnumeric() or source.endswith('.db') or (is_url and not is_file)
  201. screenshot = self.source.lower().startswith('screen')
  202. if is_url and is_file:
  203. self.source = check_file(self.source) # download
  204. self.save_dir = increment_path(Path(self.opt.project) / self.opt.name,
  205. exist_ok=self.opt.exist_ok) # increment run
  206. # self.save_dir = self.save_dir / Path(self.opt.weights).stem
  207. # self.save_dir.mkdir(parents=True, exist_ok=True)
  208. (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True,
  209. exist_ok=True) # make dir
  210. print(f'device = {self.opt.device}')
  211. device = select_device(self.opt.device)
  212. if v8:
  213. self.model = AutoBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data,
  214. fp16=self.opt.half)
  215. if Path(weights).stem in ['arm', 'uniform','fall','persontre','belt']:
  216. if Path(weights).stem == 'arm':
  217. self.personmodel = AutoBackend('yolov8m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data,
  218. fp16=self.opt.half)
  219. elif Path(weights).stem in ['uniform','fall','persontre','belt']:
  220. self.personmodel = AutoBackend('yolo11m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data,
  221. fp16=self.opt.half)
  222. else:
  223. self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data,
  224. fp16=self.opt.half)
  225. if Path(weights).stem in ['helmet', 'arm']:
  226. self.personmodel = DetectMultiBackend('personcount.pt', device=device, dnn=self.opt.dnn,
  227. data=self.opt.data, fp16=self.opt.half)
  228. self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
  229. self.classify = classify
  230. if self.classify:
  231. if Path(weights).stem != "arm":
  232. classifier_model = torch.load(f"{Path(weights).stem}cls.pt")
  233. self.classifier_model = classifier_model.to(device)
  234. self.classifier_model.eval()
  235. else:
  236. self.classifier_model = AutoBackend(f"{Path(weights).stem}cls.pt", device=device, dnn=self.opt.dnn,
  237. data=self.opt.data, fp16=self.opt.half)
  238. #self.imgsz = check_img_size(self.opt.imgsz, s=self.stride)
  239. if Path(weights).stem in ['helmet','uniform'] :
  240. self.imgsz = [960,960]
  241. elif Path(weights).stem in ['persontre']:
  242. self.imgsz = [1280,1280]
  243. else:
  244. self.imgsz = [640,640]
  245. self.model.warmup(imgsz=(1, 3, *self.imgsz))
  246. self.readpoint()
  247. savefold = Path(f'/home/h3c/yolo/log/{Path(weights).stem}')
  248. savefold.mkdir(parents=True, exist_ok=True)
  249. logger.remove()
  250. self.logger = logger.bind(process_log=f'/home/h3c/yolo/log/{Path(weights).stem}/file.log')
  251. self.logger.add(f'./log/{Path(weights).stem}/file.log', rotation="1 GB",retention="3 days")
  252. #self.logger.remove()
  253. print(self.imgsz)
  254. self.updatetime = time.time()
  255. self.updatemtime = time.time()
  256. self.filetime = os.path.getmtime(self.opt.weights)
  257. self.taskname = taskmap[Path(self.opt.weights).stem]()
  258. t1 = threading.Thread(target=self.load, daemon=True)
  259. t1.start()
  260. @smart_inference_mode()
  261. def infer(self, queue, runmodel):
  262. pretime = time.time()
  263. seen, windows, self.dt = 0, [], (Profile(), Profile(), Profile())
  264. #
  265. # print ("数据库打开成功")
  266. while True:
  267. if time.localtime().tm_hour not in range(7, 20):
  268. time.sleep(30)
  269. continue
  270. # print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
  271. if time.time() - pretime > 300:
  272. ret = self.readpoint()
  273. pretime = time.time()
  274. if not ret:
  275. print(f'{Path(self.opt.weights).stem} {runmodel}')
  276. runmodel.pop(Path(self.opt.weights).stem)
  277. print(f'{Path(self.opt.weights).stem} {runmodel}')
  278. break
  279. print(f'queuelen = {len(queue)}')
  280. for que in queue:
  281. if que.qsize() == 0:
  282. print('queuezero')
  283. time.sleep(0.01)
  284. if que.qsize() > 0:
  285. # if time.time()-pretime>300:
  286. # ret = self.readpoint()
  287. # pretime = time.time()
  288. # if not ret:
  289. # print(f'{Path(self.opt.weights).stem} {runmodel}')
  290. # runmodel.pop(Path(self.opt.weights).stem)
  291. # print(f'{Path(self.opt.weights).stem} {runmodel}')
  292. # break
  293. setframe = que.get()
  294. # print('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')
  295. # if setframe is not None
  296. path, im0s, vid_cap, s, videotime, channels = setframe
  297. algchannel = list(self.dirmodel.keys())
  298. print(algchannel)
  299. print(path)
  300. algchannel = np.array(algchannel)
  301. channelsnp = np.array(channels)
  302. algindex = np.where(np.in1d(channelsnp, algchannel))[0]
  303. algindex = list(algindex)
  304. path = np.array(path)
  305. path = path[algindex]
  306. path = path.tolist()
  307. channels = np.array(channels)
  308. channels = channels[algindex]
  309. channels = channels.tolist()
  310. # print(algindex)
  311. if len(algindex) == 0:
  312. continue
  313. # for ia in algindex:
  314. # print(type(im0s[ia]))
  315. # print(im0s[ia].shape)
  316. #im = im[algindex]
  317. # for ia in algindex:
  318. # print(type(ia))
  319. try:
  320. im0s = np.asarray(im0s)
  321. except Exception:
  322. im0s = np.asarray(im0s, dtype=object)
  323. print(im0s.shape)
  324. im0s = im0s[algindex]
  325. if '61' in channels:
  326. self.pt = False
  327. im = preprocess(im0s,self.pt,self.imgsz,self.stride)
  328. # im0s = im0s.tolist()
  329. print(f'algindex = {algindex}')
  330. print(f'im0s ={im0s[0].shape}')
  331. videotime = np.array(videotime)
  332. videotime = videotime[algindex]
  333. videotime = tuple(map(tuple, videotime))
  334. with self.dt[0]:
  335. im = torch.from_numpy(im).to(self.model.device)
  336. im = im.half() if self.model.fp16 else im.float() # uint8 to fp16/32
  337. im /= 255 # 0 - 255 to 0.0 - 1.0
  338. if len(im.shape) == 3:
  339. im = im[None] # expand for batch dim
  340. # Inference
  341. with self.dt[1]:
  342. visualize = increment_path(self.save_dir / Path(path).stem,
  343. mkdir=True) if self.opt.visualize else False
  344. # print('error')
  345. # print(self.model)
  346. pred = self.model(im, augment=self.opt.augment, visualize=visualize)
  347. self.postprocess(pred, path, im0s, im, s, videotime, channels)
  348. def postprocess(self, pred, path, im0s, im, s, videotime, channels):
  349. if time.time() - self.updatemtime > 300:
  350. if self.filetime != os.path.getmtime(self.opt.weights):
  351. try:
  352. device = select_device(self.opt.device)
  353. print("load new load")
  354. self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data,
  355. fp16=self.opt.half)
  356. self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
  357. self.filetime = os.path.getmtime(self.opt.weights)
  358. except Exception:
  359. print("load error")
  360. # try:
  361. # if modelalgdir[Path(self.opt.weights).stem]!='0':
  362. print(modelalgdir[Path(self.opt.weights).stem])
  363. try:
  364. rea = requests.post(url=urla, data={'algorithmCode': modelalgdir[Path(self.opt.weights).stem]}).json()[
  365. 'data']
  366. con = rea[0]['confidence']
  367. self.opt.conf_thres = con
  368. except Exception:
  369. print('error')
  370. self.updatemtime = time.time()
  371. seen = 0
  372. # dt = (Profile(), Profile(), Profile())
  373. print(f'senn = {seen}')
  374. windows = []
  375. if Path(self.opt.weights).stem:
  376. labelnamelist = []
  377. with self.dt[2]:
  378. # print(f'cropshape={pred.shape}')
  379. if self.opt.v8:
  380. from ultralytics.utils.ops import non_max_suppression
  381. else:
  382. from utils.general import non_max_suppression
  383. if Path(self.opt.weights).stem == "fall":
  384. pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, self.opt.classes,
  385. self.opt.agnostic_nms, max_det=self.opt.max_det,nc=1)
  386. else:
  387. pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, self.opt.classes,
  388. self.opt.agnostic_nms, max_det=self.opt.max_det)
  389. # Second-stage classifier (optional)
  390. # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
  391. if self.classify and Path(self.opt.weights).stem != 'persontre':
  392. if Path(self.opt.weights).stem == 'arm':
  393. pred = apply_classifierarm(pred, self.classifier_model, im, im0s, Path(self.opt.weights).stem)
  394. else:
  395. pred = apply_classifier1(pred, self.classifier_model, im, im0s, Path(self.opt.weights).stem)
  396. # Process predictions
  397. # print(f'predshape={pred.shape}')
  398. for i, det in enumerate(pred): # per image
  399. if time.time() - self.updatetime > 300:
  400. dataele = {
  401. "algorithmCode": self.dirmodel[channels[i]]['classindex'],
  402. "algorithmIp": self.dirmodel[channels[i]]['algip'],
  403. "channel": self.dirmodel[channels[i]]['channel']
  404. }
  405. try:
  406. resultele = requests.post(url=urlele, data=dataele).json()['data']['pointCollections']
  407. resultele = resultele.split(',||')
  408. resultele = tuple(resultele)
  409. point = '%s:' * len(resultele) % resultele
  410. if len(point[:-2]) > 1:
  411. self.dirmodel[channels[i]]['point'] = point[:-2]
  412. except Exception:
  413. print('post error')
  414. if Path(self.opt.weights).stem == 'personcount':
  415. try:
  416. resultper = requests.post(url=urlperson, data=dataele).json()['data']
  417. personcountdir[channels[i]] = int(resultper)
  418. except Exception:
  419. print('urlpersonerror')
  420. if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty':
  421. datatime = {
  422. "algorithmCode": self.dirmodel[channels[i]]['classindex'],
  423. "algorithmIp": self.dirmodel[channels[i]]['algip'],
  424. "channel": self.dirmodel[channels[i]]['channel']
  425. }
  426. try:
  427. resulttime = requests.post(url=urltime, data=dataele).json()['data']
  428. self.dirmodel[channel]['durtime'] = int(resulttime)
  429. except Exception:
  430. print('posttime error')
  431. self.updatetime = time.time()
  432. seen += 1
  433. if self.webcam: # batch_size >= 1
  434. p, im0 = path[i], im0s[i].copy()
  435. s += f'{i}: '
  436. else:
  437. p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
  438. p = Path(p) # to Path
  439. save_path = str(self.save_dir / p.name) # im.jpg
  440. # txt_path = str(self.save_dir / 'labels' / p.stem) + (
  441. # '' #if dataset.mode == 'image' else f'_{frame}') # im.txt
  442. s += '%gx%g ' % im.shape[2:] # print string
  443. gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
  444. imc = im0.copy() # for save_crop
  445. annotator = Annotator(im0, line_width=self.opt.line_thickness, example=str(self.names))
  446. flag = False
  447. if len(det) and Path(self.opt.weights).stem != 'duty':
  448. # flag = True
  449. # Rescale boxes from img_size to im0 size
  450. det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
  451. # Print results
  452. for c in det[:, 5].unique():
  453. n = (det[:, 5] == c).sum() # detections per class
  454. s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
  455. # Write results
  456. if Path(self.opt.weights).stem in ['arm', 'uniform','fall','persontre','belt']:
  457. personpred = self.personmodel(im[i][None], None, None)
  458. personpred = non_max_suppression(personpred, 0.7, self.opt.iou_thres, 0,
  459. self.opt.agnostic_nms, max_det=self.opt.max_det)
  460. if len(personpred[0]) == 0:
  461. flag = False
  462. elif Path(self.opt.weights).stem == 'other':
  463. persondet = []
  464. personpred = personpred[0]
  465. personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
  466. for *perxyxy, conf, cls in reversed(personpred):
  467. print(perxyxy)
  468. x1, y1, x3, y3 = perxyxy
  469. x1, y1, x3, y3 = int(x1), int(y1), int(x3), int(y3)
  470. x2, y2 = x3, y1
  471. x4, y4 = x1, y3
  472. flag = self.taskname.getflag(det, persondet, annotator, self.dirmodel[channels[i]]['fence'],
  473. self.dirmodel[channels[i]]['point'], self.names,
  474. self.dirmodel[channels[i]]['label'],channel=channls[i])
  475. else:
  476. persondet = []
  477. personpred = personpred[0]
  478. personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
  479. for *perxyxy, conf, cls in reversed(personpred):
  480. print(perxyxy)
  481. if conf < 0.8:
  482. continue
  483. x1, y1, x3, y3 = perxyxy
  484. x1, y1, x3, y3 = int(x1), int(y1), int(x3), int(y3)
  485. x2, y2 = x3, y1
  486. x4, y4 = x1, y3
  487. persondet.append([x1, y1, x2, y2, x3, y3, x4, y4])
  488. if Path(self.opt.weights).stem == "fall":
  489. flag = self.taskname.getflag(det, persondet, annotator, self.dirmodel[channels[i]]['fence'],
  490. self.dirmodel[channels[i]]['point'], self.names,
  491. self.dirmodel[channels[i]]['label'],imshape=im.shape[2:],channel=channels[i])
  492. else:
  493. flag = self.taskname.getflag(det, persondet, annotator, self.dirmodel[channels[i]]['fence'],
  494. self.dirmodel[channels[i]]['point'], self.names,
  495. self.dirmodel[channels[i]]['label'],channel=channels[i])
  496. else:
  497. if Path(self.opt.weights).stem in ['personcount']:
  498. flag = self.taskname.getflag(det, None, annotator, self.dirmodel[channels[i]]['fence'],
  499. self.dirmodel[channels[i]]['point'], self.names,
  500. self.dirmodel[channels[i]]['label'], personcountdir[channels[i]],channel=channels[i])
  501. elif Path(self.opt.weights).stem in ['persontre']:
  502. flag = self.taskname.getflag(det, None, annotator, self.dirmodel[channels[i]]['fence'],
  503. self.dirmodel[channels[i]]['point'], self.names,
  504. self.dirmodel[channels[i]]['label'], 1, imc,channel=channels[i])
  505. else:
  506. flag = self.taskname.getflag(det, None, annotator, self.dirmodel[channels[i]]['fence'],
  507. self.dirmodel[channels[i]]['point'], self.names,
  508. self.dirmodel[channels[i]]['label'],channel=channels[i])
  509. if flag:
  510. # if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
  511. self.dirmodel[channels[i]]['detframe'].pop(0)
  512. self.dirmodel[channels[i]]['detframe'].append(1)
  513. self.dirmodel[channels[i]]['preim'] = annotator.result()
  514. self.dirmodel[channels[i]]['oripreim'] = imc
  515. self.dirmodel[channels[i]]['posttime'] = videotime[i]
  516. print(self.dirmodel[channels[i]]['detframe'])
  517. # self.dirmodel[channels[i]]['imgtime'] = videotime[i]
  518. else:
  519. # print(f'deti= {i}')
  520. # print(detframe[i])
  521. # if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
  522. self.dirmodel[channels[i]]['detframe'].pop(0)
  523. self.dirmodel[channels[i]]['detframe'].append(0)
  524. print(self.dirmodel[channels[i]]['detframe'])
  525. if not self.dirmodel[channels[i]]['detflag'] and self.dirmodel[channels[i]]['detframe'].count(1) >= 1:
  526. self.dirmodel[channels[i]]['detflag'] = True
  527. self.dirmodel[channels[i]]['detpretime'] = time.time()
  528. elif self.dirmodel[channels[i]]['detframe'].count(1) == 0:
  529. self.dirmodel[channels[i]]['detflag'] = False
  530. self.dirmodel[channels[i]]['detpretime'] = float('inf')
  531. # Stream results
  532. # im0 = annotator.result()
  533. if time.time() - self.dirmodel[channels[i]]['postpretime'] > 30 and time.time() - \
  534. self.dirmodel[channels[i]]['detpretime'] > self.dirmodel[channels[i]]['durtime'] and \
  535. self.dirmodel[channels[i]]['detflag']:
  536. print('post-------------------------------------------------------------------------')
  537. # time.sleep(30)
  538. # print(time.time() - postpretime[i])
  539. # print('111111111111111111111111111111111111111111111111')
  540. # print(dirmodel[channels[i]]['preim'].shape)
  541. success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim'])
  542. content = encoded_image.tobytes()
  543. successori, encoded_imageori = cv2.imencode('.jpg', self.dirmodel[channels[i]]['oripreim'])
  544. contentori = encoded_imageori.tobytes()
  545. filename = f'{p.stem}_{int(time.time())}.jpg'
  546. filenameori = f'ori{p.stem}_{int(time.time())}.jpg'
  547. print(f'str(p) {p.name}')
  548. print(channels[i])
  549. #交互大模型
  550. if Path(self.opt.weights).stem in ['uniform','fall','persontre', 'helmet']:
  551. postdir = {'image_base64':numpy_image_to_base64(self.dirmodel[channels[i]]['oripreim']),'question':questiondir[Path(self.opt.weights).stem]}
  552. response = requests.post(urlque,json=postdir).json()['data']['answer']
  553. #print(f'response = {response}')
  554. #if random.choice([True, False]):
  555. if "是" in response and (Path(self.opt.weights).stem == "uniform" or Path(self.opt.weights).stem == "helmet"):
  556. continue
  557. if "否" in response and (Path(self.opt.weights).stem == "fall" or Path(self.opt.weights).stem == "persontre"):
  558. continue
  559. payload = {'channel': self.dirmodel[channels[i]]['channel'],
  560. 'classIndex': self.dirmodel[channels[i]]['classindex'],
  561. 'ip': self.dirmodel[channels[i]]['algip'],
  562. 'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', self.dirmodel[channels[i]]['posttime']),
  563. 'videoUrl': channels[i]}
  564. files = [
  565. ('file', (filename, content, 'image/jpeg')),
  566. ('oldFile', (filenameori, contentori, 'image/jpeg')),
  567. ]
  568. try:
  569. result = requests.post(url, data=payload, files=files)
  570. print(result)
  571. except Exception:
  572. print('posterror')
  573. # time.sleep(3000)
  574. self.dirmodel[channels[i]]['postpretime'] = time.time()
  575. self.dirmodel[channels[i]]['detflag'] = False
  576. timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime'])
  577. year = time.strftime('%Y', time.localtime(time.time()))
  578. month = time.strftime('%m', time.localtime(time.time()))
  579. day = time.strftime('%d', time.localtime(time.time()))
  580. savefold = f'/mnt/project/images/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
  581. savefold = Path(savefold)
  582. savefold.mkdir(parents=True, exist_ok=True)
  583. detsavefold = f'/mnt/project/detimages/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
  584. detsavefold = Path(detsavefold)
  585. detsavefold.mkdir(parents=True, exist_ok=True)
  586. cv2.imwrite(f'{savefold}/{timesave}.jpg', self.dirmodel[channels[i]]['oripreim'])
  587. cv2.imwrite(f'{detsavefold}/{timesave}det.jpg', self.dirmodel[channels[i]]['preim'])
  588. # if self.dirmodel[channels[i]]['detframe'].count(1)==0:
  589. # self.dirmodel[channels[i]]['detflag'] = False
  590. # time.sleep(1)
  591. self.view_img = False
  592. if self.view_img:
  593. if platform.system() == 'Linux' and p not in windows:
  594. windows.append(p)
  595. cv2.namedWindow(f'{str(p)}-{Path(self.opt.weights).stem}',
  596. cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
  597. cv2.resizeWindow(f'{str(p)}-{Path(self.opt.weights).stem}', im0.shape[1], im0.shape[0])
  598. im1 = cv2.resize(im0, (1280, 720))
  599. cv2.imshow(f'{str(p)}-{Path(self.opt.weights).stem}', im1)
  600. cv2.waitKey(1) # 1 millisecond
  601. # Save results (image with detections)
  602. # Print time (inference-only)
  603. print(f'channels[i]={channels[i]}')
  604. #LOGGER.info(
  605. # f"{s}{'' if len(det) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms {str(p)}-{Path(self.opt.weights).stem}")
  606. self.logger.info(f"{s}{'' if len(det) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms {str(p)}-{Path(self.opt.weights).stem}")
  607. def load(self):
  608. conn = sqlite3.connect(self.source)
  609. c = conn.cursor()
  610. while True:
  611. #
  612. # print ("数据库打开成功")
  613. cursor = c.execute(
  614. "SELECT modelname, addstream,delstream,streaming from CHANGESTREAM WHERE modelname= (?)",
  615. (Path(self.opt.weights).stem,))
  616. self.contentid = cursor.fetchall()
  617. # global tag
  618. # tag = Value('i', self.contentid[0][3])
  619. # print(tag.value==1)
  620. print(f'loadcontent={self.contentid[0][3]}')
  621. time.sleep(3)
  622. c.close()
  623. conn.close()
  624. def readpoint(self):
  625. data = {
  626. "algorithmCode": modelalgdir[Path(self.opt.weights).stem],
  627. "deviceIp": None,
  628. 'fwqCode': None
  629. }
  630. self.dirmodel = {}
  631. result = requests.post(url=urlt, data=data).json()['data']
  632. channell = []
  633. for info in result:
  634. # content = cursor.fetchall()
  635. # self.dirmodel = {}
  636. # for address,fence,point,channel,classindex,ip ,algip,label,durtime in content:
  637. # address = f'{address[:-1]}0'
  638. channel = info["deviceChannel"]
  639. if Path(self.opt.weights).stem == "danager" and channel == '45':
  640. continue
  641. channell.append(channel)
  642. self.dirmodel[channel] = {}
  643. self.dirmodel[channel]['fence'] = 1 if len(info["electricFence"]) > 0 else 0
  644. if Path(self.opt.weights).stem == "uniform":
  645. self.dirmodel[channel]['fence'] = 1
  646. # self.dirmodel[channel]['point'] = point
  647. self.dirmodel[channel]['channel'] = info['deviceChannel']
  648. self.dirmodel[channel]['classindex'] = info['algorithmCode']
  649. self.dirmodel[channel]['ip'] = info['deviceIp']
  650. self.dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
  651. dataele = {
  652. "algorithmCode": self.dirmodel[channel]['classindex'],
  653. "algorithmIp": self.dirmodel[channel]['algip'],
  654. "channel": self.dirmodel[channel]['channel']
  655. }
  656. resultele = requests.post(url=urlele, data=dataele).json()['data']['pointCollections']
  657. resultele = resultele.split(',||')
  658. resultele = tuple(resultele)
  659. point = '%s:' * len(resultele) % resultele
  660. if Path(self.opt.weights).stem == 'personcount':
  661. resultper = requests.post(url=urlperson, data=dataele).json()['data']
  662. personcountdir[channel] = int(resultper)
  663. if (Path(self.opt.weights).stem == "uniform") and len(
  664. point[:-2]) <= 1:
  665. self.dirmodel[channel]['point'] = "150#144,1100#144,1100#550,150#550"
  666. else:
  667. self.dirmodel[channel]['point'] = point[:-2]
  668. self.dirmodel[channel]['preim'] = None
  669. self.dirmodel[channel]['oripreim'] = None
  670. self.dirmodel[channel]['detframe'] = [0 for _ in range(2)]
  671. self.dirmodel[channel]['postpretime'] = 0
  672. self.dirmodel[channel]['detflag'] = False
  673. self.dirmodel[channel]['detpretime'] = float('inf')
  674. self.dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
  675. if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty':
  676. datatime = {
  677. "algorithmCode": self.dirmodel[channel]['classindex'],
  678. "algorithmIp": self.dirmodel[channel]['algip'],
  679. "channel": self.dirmodel[channel]['channel']
  680. }
  681. resulttime = requests.post(url=urltime, data=dataele).json()['data']
  682. self.dirmodel[channel]['durtime'] = int(resulttime)
  683. else:
  684. self.dirmodel[channel]['durtime'] = 0
  685. self.dirmodel[channel]['posttime'] = 0
  686. return sorted(channell)
  687. def getframe(queuelist, channelsl, source, tt, numworks, lock, numworkv):
  688. while True:
  689. print("dataloader")
  690. imgsz = [768, 768]
  691. print(f'source = {source}')
  692. dataset = LoadStreamsSQLTNP(channelsl, source, img_size=896,
  693. auto=True, vid_stride=20, tt=tt, numworks=numworks)
  694. bs = len(dataset)
  695. vid_path, vid_writer = [None] * bs, [None] * bs
  696. # self.detframe = [[0 for _ in range(8)] for i in range(bs)]
  697. # self.postpretime = [0]*bs
  698. # Run inference
  699. # imgsz = (1 , 3, *self.imgsz)
  700. print(imgsz)
  701. # self.model.warmup(imgsz=(1 , 3, *imgsz)) # warmup
  702. seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
  703. #
  704. # print ("数据库打开成功")
  705. pretime = time.time()
  706. tag = 0
  707. sourcebase = 'project0117.db'
  708. for path, im0s, vid_cap, s, videotime, channels in dataset:
  709. if time.time() - pretime > 300:
  710. channellist = []
  711. pretime = time.time()
  712. data = {
  713. "algorithmCode": None,
  714. "deviceIp": None,
  715. "fwqCode": None
  716. }
  717. try:
  718. result = requests.post(url=urlt, data=data).json()['data']
  719. except Exception:
  720. result = []
  721. for info in result:
  722. data = {
  723. "channel": info["deviceChannel"],
  724. "ip": info["deviceAlgorithmIp"]
  725. }
  726. chaflag = any(info["deviceChannel"] in t for t in channellist)
  727. # personcountdir[channel] = num
  728. if not chaflag:
  729. address = requests.post(url=urlrtsp, data=data).json()['msg']
  730. channellist.append((info['deviceChannel'], address))
  731. channelsa = []
  732. sourcea = []
  733. channellist = set(channellist)
  734. channellist = sorted(channellist, key=lambda x: x[0])
  735. # channellist = set(channellist)
  736. for cha, add in channellist:
  737. channelsa.append(cha)
  738. sourcea.append(add)
  739. channelsl = sorted(channelsl)
  740. # channelsa = sorted(channelsa)
  741. if channelsa != channelsl and len(channelsa) > 0:
  742. print(f'channelsa = {channelsa}')
  743. print(f'channelsl = {channelsl}')
  744. dataset.close()
  745. channelsl = channelsa
  746. source = sourcea
  747. break;
  748. for key, value in queuelist.items():
  749. hour = time.localtime(time.time()).tm_hour
  750. if hour in range(7, 19):
  751. value[-1].put((path, im0s, vid_cap, s, videotime, channels))
  752. value[-1].get() if value[-1].qsize() == 10 else time.sleep(0.001)
  753. def getmutpro(channels, source, streamlist, numworkv, lock, numworks=1, modellen=None):
  754. processlist = []
  755. queuelist = {}
  756. for i in range(numworks):
  757. for model in modellen:
  758. queue = Queue(maxsize=10)
  759. queuelist.setdefault(model, [])
  760. queuelist[model].append(queue)
  761. process = Process(target=getframe,
  762. args=(queuelist, channels, source, i, numworks, lock, numworkv))
  763. processlist.append(process)
  764. process.start()
  765. # queuelist.append(queue)
  766. return queuelist
  767. def modelfun(queue, weights, sourcedb, classes, device, classify, conf_thres, runmodel, v8=False):
  768. print(weights)
  769. detectdemo = Detect(weights=weights, source=sourcedb, classes=classes, device=device, classify=classify,
  770. conf_thres=conf_thres, v8=v8)
  771. detectdemo.infer(queue, runmodel)
  772. def parse_opt():
  773. parser = argparse.ArgumentParser()
  774. parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
  775. opt = parser.parse_args()
  776. return opt
  777. def main(opt):
  778. check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
  779. run(**vars(opt))
  780. if __name__ == '__main__':
  781. # torch.multiprocessing.set_start_method('spawn')
  782. # set_start_method('spawn')
  783. opt = parse_opt()
  784. dbpath = 'projectnew.db'
  785. conn = sqlite3.connect(dbpath)
  786. #
  787. # print ("数据库打开成功")
  788. c = conn.cursor()
  789. task(c, conn, urlt, urla)
  790. cursor = c.execute('select channel,algip from stream ')
  791. result = cursor.fetchall()
  792. for channel, algip in result:
  793. data = {
  794. "channel": channel,
  795. "ip": algip
  796. }
  797. # personcountdir[channel] = num
  798. address = requests.post(url=urlrtsp, data=data).json()['msg']
  799. c.execute('UPDATE STREAM set address= (?) where channel =(?)', (address, channel))
  800. conn.commit()
  801. cursor = c.execute(
  802. "SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname ='duty' or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname= 'arm' or modelname = 'persontre' or modelname = 'bag' or modelname = 'fall' or modelname = 'belt' or modelname = 'clear'")
  803. # cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
  804. content = cursor.fetchall()
  805. cursor = c.execute("SELECT address,channel from STREAM ")
  806. # cursor = c.execute("SELECT address from STREAM where modelname = 'helmet'")
  807. contenta = cursor.fetchall()
  808. source = []
  809. modellist = []
  810. addcha = []
  811. channellist = []
  812. for i in contenta:
  813. addcha.append((i[0], i[1]))
  814. # modellist.append(i[1])
  815. addcha = set(addcha)
  816. addcha = sorted(addcha, key=lambda x: x[1])
  817. for add, cha in addcha:
  818. source.append(add)
  819. channellist.append(cha)
  820. # source = set(source)
  821. print(addcha)
  822. source = list(source)
  823. cursor = c.execute(
  824. "SELECT modelname from STREAM where (modelname ='helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname = 'duty' or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag' or modelname = 'fall' or modelname = 'belt' or modelname = 'clear')")
  825. contentm = cursor.fetchall()
  826. for m in contentm:
  827. modellist.append(m[0])
  828. modellist = set(modellist)
  829. modellist = list(modellist)
  830. contentlist = []
  831. for i in content:
  832. contentlist.append(i[0])
  833. # source.sort()
  834. n = len(content)
  835. print(f'modelname={n}')
  836. print(content)
  837. # content.reverse()
  838. print(content)
  839. print(source)
  840. # main(opt)
  841. # processes = []
  842. streamqueue = Queue(maxsize=4)
  843. numworkv = Value('i', 0)
  844. manager = Manager()
  845. lock = multiprocessing.Lock()
  846. streamlist = manager.list()
  847. numworks = 7
  848. modellen = []
  849. for i in modellist:
  850. if i in contentlist:
  851. modellen.append(i)
  852. queuelist = getmutpro(channellist, source, streamlist, numworkv, lock, numworks, modellen)
  853. deid = 0
  854. # pool = ThreadPoolExecutor(max_workers=n)
  855. runmodel = manager.dict()
  856. while True:
  857. for i in modellist:
  858. if i in contentlist:
  859. if i not in runmodel:
  860. # print(i)
  861. # detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
  862. c.execute('select conf,cla from changestream where modelname = (?)', (i,))
  863. rea = c.fetchall()
  864. print(f'weights ={i[0]}.pt')
  865. if i in ['duty', 'danager', 'inspection', 'cross', 'personcount']:
  866. process = Process(target=modelfun, args=(
  867. queuelist[i], f'{i}.pt', dbpath, [0], 0, rea[0][1], rea[0][0], runmodel, True))
  868. else:
  869. if i in ['fall', 'extinguisher']:
  870. process = Process(target=modelfun, args=(
  871. queuelist[i], f'{i}.pt', dbpath, None, 0, rea[0][1], rea[0][0], runmodel,True))
  872. else:
  873. process = Process(target=modelfun, args=(
  874. queuelist[i], f'{i}.pt', dbpath, None, 0, rea[0][1], rea[0][0], runmodel, True))
  875. time.sleep(3)
  876. process.start()
  877. deid = deid + 1
  878. runmodel[i] = 1
  879. time.sleep(600)
  880. task(c, conn, urlt, urla)
  881. cursor = c.execute(
  882. "SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname ='duty' or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag' or modelname = 'fall' or modelname = 'belt' or modelname = 'clear'")
  883. content = cursor.fetchall()
  884. contentlist = []
  885. for con in content:
  886. contentlist.append(con[0])
  887. cursor = c.execute("SELECT address,channel from STREAM ")
  888. contenta = cursor.fetchall()
  889. source = []
  890. modellist = []
  891. addcha = []
  892. channellist = []
  893. for i in contenta:
  894. addcha.append((i[0], i[1]))
  895. # modellist.append(i[1])
  896. addcha = set(addcha)
  897. addcha = sorted(addcha)
  898. for a, cha in addcha:
  899. source.append(a)
  900. channellist.append(cha)
  901. print(addcha)
  902. source = list(source)
  903. # source.sort()
  904. cursor = c.execute(
  905. "SELECT modelname from STREAM where (modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname = 'duty' or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag' or modelname = 'fall' or modelname = 'belt' or modelname = 'clear')")
  906. contentm = cursor.fetchall()
  907. for m in contentm:
  908. modellist.append(m[0])
  909. modellist = set(modellist)
  910. n = len(content)
  911. print(f'modelname={n}')
  912. print(content)
  913. # content.reverse()
  914. print(content)