Ver Fonte

first commit

xtj há 2 meses atrás
commit
67ecf1a36f
100 ficheiros alterados com 18610 adições e 0 exclusões
  1. 3 0
      .idea/.gitignore
  2. 12 0
      .idea/code.iml
  3. 6 0
      .idea/inspectionProfiles/profiles_settings.xml
  4. 7 0
      .idea/misc.xml
  5. 8 0
      .idea/modules.xml
  6. 6 0
      .idea/vcs.xml
  7. 226 0
      classify/predict.py
  8. 333 0
      classify/train.py
  9. 1481 0
      classify/tutorial.ipynb
  10. 170 0
      classify/val.py
  11. 74 0
      data/Argoverse.yaml
  12. 54 0
      data/GlobalWheat2020.yaml
  13. 1022 0
      data/ImageNet.yaml
  14. 438 0
      data/Objects365.yaml
  15. 53 0
      data/SKU-110K.yaml
  16. 100 0
      data/VOC.yaml
  17. 70 0
      data/VisDrone.yaml
  18. 116 0
      data/coco.yaml
  19. 101 0
      data/coco128-seg.yaml
  20. 101 0
      data/coco128.yaml
  21. BIN
      data/helmet.pt
  22. 34 0
      data/hyps/hyp.Objects365.yaml
  23. 40 0
      data/hyps/hyp.VOC.yaml
  24. 35 0
      data/hyps/hyp.no-augmentation.yaml
  25. 34 0
      data/hyps/hyp.scratch-high.yaml
  26. 34 0
      data/hyps/hyp.scratch-low.yaml
  27. 34 0
      data/hyps/hyp.scratch-med.yaml
  28. 22 0
      data/scripts/download_weights.sh
  29. 56 0
      data/scripts/get_coco.sh
  30. 17 0
      data/scripts/get_coco128.sh
  31. 51 0
      data/scripts/get_imagenet.sh
  32. 18 0
      data/smoke.yaml
  33. 20 0
      data/uniform.yaml
  34. 153 0
      data/xView.yaml
  35. 2154 0
      detectopencvmut0113auta.py
  36. 2161 0
      detectopencvmutbig.py
  37. 1193 0
      detectopencvmutfall0207.py
  38. 857 0
      detectopencvthrjump.py
  39. 1247 0
      detectopencvthrrun1230.py
  40. 0 0
      models/__init__.py
  41. BIN
      models/__pycache__/__init__.cpython-311.pyc
  42. BIN
      models/__pycache__/__init__.cpython-38.pyc
  43. BIN
      models/__pycache__/common.cpython-311.pyc
  44. BIN
      models/__pycache__/common.cpython-38.pyc
  45. BIN
      models/__pycache__/experimental.cpython-38.pyc
  46. BIN
      models/__pycache__/yolo.cpython-38.pyc
  47. 871 0
      models/common.py
  48. 111 0
      models/experimental.py
  49. 59 0
      models/hub/anchors.yaml
  50. 51 0
      models/hub/yolov3-spp.yaml
  51. 41 0
      models/hub/yolov3-tiny.yaml
  52. 51 0
      models/hub/yolov3.yaml
  53. 48 0
      models/hub/yolov5-bifpn.yaml
  54. 42 0
      models/hub/yolov5-fpn.yaml
  55. 54 0
      models/hub/yolov5-p2.yaml
  56. 41 0
      models/hub/yolov5-p34.yaml
  57. 56 0
      models/hub/yolov5-p6.yaml
  58. 67 0
      models/hub/yolov5-p7.yaml
  59. 48 0
      models/hub/yolov5-panet.yaml
  60. 60 0
      models/hub/yolov5l6.yaml
  61. 60 0
      models/hub/yolov5m6.yaml
  62. 60 0
      models/hub/yolov5n6.yaml
  63. 49 0
      models/hub/yolov5s-LeakyReLU.yaml
  64. 48 0
      models/hub/yolov5s-ghost.yaml
  65. 48 0
      models/hub/yolov5s-transformer.yaml
  66. 60 0
      models/hub/yolov5s6.yaml
  67. 60 0
      models/hub/yolov5x6.yaml
  68. 48 0
      models/segment/yolov5l-seg.yaml
  69. 48 0
      models/segment/yolov5m-seg.yaml
  70. 48 0
      models/segment/yolov5n-seg.yaml
  71. 48 0
      models/segment/yolov5s-seg.yaml
  72. 48 0
      models/segment/yolov5x-seg.yaml
  73. 608 0
      models/tf.py
  74. 391 0
      models/yolo.py
  75. 48 0
      models/yolov5l.yaml
  76. 48 0
      models/yolov5m.yaml
  77. 48 0
      models/yolov5n.yaml
  78. 48 0
      models/yolov5s.yaml
  79. 48 0
      models/yolov5x.yaml
  80. BIN
      projectnew.db
  81. 285 0
      segment/predict.py
  82. 666 0
      segment/train.py
  83. 595 0
      segment/tutorial.ipynb
  84. 473 0
      segment/val.py
  85. 86 0
      utils/__init__.py
  86. BIN
      utils/__pycache__/__init__.cpython-38.pyc
  87. BIN
      utils/__pycache__/augmentations.cpython-38.pyc
  88. BIN
      utils/__pycache__/autoanchor.cpython-38.pyc
  89. BIN
      utils/__pycache__/autobatch.cpython-38.pyc
  90. BIN
      utils/__pycache__/callbacks.cpython-38.pyc
  91. BIN
      utils/__pycache__/dataloaders.cpython-38.pyc
  92. BIN
      utils/__pycache__/downloads.cpython-38.pyc
  93. BIN
      utils/__pycache__/general.cpython-38.pyc
  94. BIN
      utils/__pycache__/loss.cpython-38.pyc
  95. BIN
      utils/__pycache__/metrics.cpython-38.pyc
  96. BIN
      utils/__pycache__/plots.cpython-38.pyc
  97. BIN
      utils/__pycache__/renwu.cpython-38.pyc
  98. BIN
      utils/__pycache__/torch_utils.cpython-38.pyc
  99. 103 0
      utils/activations.py
  100. 397 0
      utils/augmentations.py

+ 3 - 0
.idea/.gitignore

@@ -0,0 +1,3 @@
+# Default ignored files
+/shelf/
+/workspace.xml

+ 12 - 0
.idea/code.iml

@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="PYTHON_MODULE" version="4">
+  <component name="NewModuleRootManager">
+    <content url="file://$MODULE_DIR$" />
+    <orderEntry type="jdk" jdkName="pytorch38" jdkType="Python SDK" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+  <component name="PyDocumentationSettings">
+    <option name="format" value="GOOGLE" />
+    <option name="myDocStringFormat" value="Google" />
+  </component>
+</module>

+ 6 - 0
.idea/inspectionProfiles/profiles_settings.xml

@@ -0,0 +1,6 @@
+<component name="InspectionProjectProfileManager">
+  <settings>
+    <option name="USE_PROJECT_PROFILE" value="false" />
+    <version value="1.0" />
+  </settings>
+</component>

+ 7 - 0
.idea/misc.xml

@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="Black">
+    <option name="sdkName" value="pytorch38" />
+  </component>
+  <component name="ProjectRootManager" version="2" project-jdk-name="pytorch38" project-jdk-type="Python SDK" />
+</project>

+ 8 - 0
.idea/modules.xml

@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/.idea/code.iml" filepath="$PROJECT_DIR$/.idea/code.iml" />
+    </modules>
+  </component>
+</project>

+ 6 - 0
.idea/vcs.xml

@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="VcsDirectoryMappings">
+    <mapping directory="" vcs="Git" />
+  </component>
+</project>

+ 226 - 0
classify/predict.py

@@ -0,0 +1,226 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
+
+Usage - sources:
+    $ python classify/predict.py --weights yolov5s-cls.pt --source 0                               # webcam
+                                                                   img.jpg                         # image
+                                                                   vid.mp4                         # video
+                                                                   screen                          # screenshot
+                                                                   path/                           # directory
+                                                                   list.txt                        # list of images
+                                                                   list.streams                    # list of streams
+                                                                   'path/*.jpg'                    # glob
+                                                                   'https://youtu.be/Zgi9g1ksQHc'  # YouTube
+                                                                   'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
+
+Usage - formats:
+    $ python classify/predict.py --weights yolov5s-cls.pt                 # PyTorch
+                                           yolov5s-cls.torchscript        # TorchScript
+                                           yolov5s-cls.onnx               # ONNX Runtime or OpenCV DNN with --dnn
+                                           yolov5s-cls_openvino_model     # OpenVINO
+                                           yolov5s-cls.engine             # TensorRT
+                                           yolov5s-cls.mlmodel            # CoreML (macOS-only)
+                                           yolov5s-cls_saved_model        # TensorFlow SavedModel
+                                           yolov5s-cls.pb                 # TensorFlow GraphDef
+                                           yolov5s-cls.tflite             # TensorFlow Lite
+                                           yolov5s-cls_edgetpu.tflite     # TensorFlow Edge TPU
+                                           yolov5s-cls_paddle_model       # PaddlePaddle
+"""
+
+import argparse
+import os
+import platform
+import sys
+from pathlib import Path
+
+import torch
+import torch.nn.functional as F
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+
+from models.common import DetectMultiBackend
+from utils.augmentations import classify_transforms
+from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
+from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
+                           increment_path, print_args, strip_optimizer)
+from utils.plots import Annotator
+from utils.torch_utils import select_device, smart_inference_mode
+
+
+@smart_inference_mode()
+def run(
+        weights=ROOT / 'yolov5s-cls.pt',  # model.pt path(s)
+        source=ROOT / 'data/images',  # file/dir/URL/glob/screen/0(webcam)
+        data=ROOT / 'data/coco128.yaml',  # dataset.yaml path
+        imgsz=(224, 224),  # inference size (height, width)
+        device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
+        view_img=False,  # show results
+        save_txt=False,  # save results to *.txt
+        nosave=False,  # do not save images/videos
+        augment=False,  # augmented inference
+        visualize=False,  # visualize features
+        update=False,  # update all models
+        project=ROOT / 'runs/predict-cls',  # save results to project/name
+        name='exp',  # save results to project/name
+        exist_ok=False,  # existing project/name ok, do not increment
+        half=False,  # use FP16 half-precision inference
+        dnn=False,  # use OpenCV DNN for ONNX inference
+        vid_stride=1,  # video frame-rate stride
+):
+    source = str(source)
+    save_img = not nosave and not source.endswith('.txt')  # save inference images
+    is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
+    is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
+    webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
+    screenshot = source.lower().startswith('screen')
+    if is_url and is_file:
+        source = check_file(source)  # download
+
+    # Directories
+    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
+    (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir
+
+    # Load model
+    device = select_device(device)
+    model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
+    stride, names, pt = model.stride, model.names, model.pt
+    imgsz = check_img_size(imgsz, s=stride)  # check image size
+
+    # Dataloader
+    bs = 1  # batch_size
+    if webcam:
+        view_img = check_imshow(warn=True)
+        dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
+        bs = len(dataset)
+    elif screenshot:
+        dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
+    else:
+        dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
+    vid_path, vid_writer = [None] * bs, [None] * bs
+
+    # Run inference
+    model.warmup(imgsz=(1 if pt else bs, 3, *imgsz))  # warmup
+    seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+    for path, im, im0s, vid_cap, s in dataset:
+        with dt[0]:
+            im = torch.Tensor(im).to(model.device)
+            im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32
+            if len(im.shape) == 3:
+                im = im[None]  # expand for batch dim
+
+        # Inference
+        with dt[1]:
+            results = model(im)
+
+        # Post-process
+        with dt[2]:
+            pred = F.softmax(results, dim=1)  # probabilities
+
+        # Process predictions
+        for i, prob in enumerate(pred):  # per image
+            seen += 1
+            if webcam:  # batch_size >= 1
+                p, im0, frame = path[i], im0s[i].copy(), dataset.count
+                s += f'{i}: '
+            else:
+                p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
+
+            p = Path(p)  # to Path
+            save_path = str(save_dir / p.name)  # im.jpg
+            txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}')  # im.txt
+
+            s += '%gx%g ' % im.shape[2:]  # print string
+            annotator = Annotator(im0, example=str(names), pil=True)
+
+            # Print results
+            top5i = prob.argsort(0, descending=True)[:5].tolist()  # top 5 indices
+            s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, "
+
+            # Write results
+            text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i)
+            if save_img or view_img:  # Add bbox to image
+                annotator.text((32, 32), text, txt_color=(255, 255, 255))
+            if save_txt:  # Write to file
+                with open(f'{txt_path}.txt', 'a') as f:
+                    f.write(text + '\n')
+
+            # Stream results
+            im0 = annotator.result()
+            if view_img:
+                if platform.system() == 'Linux' and p not in windows:
+                    windows.append(p)
+                    cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
+                    cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
+                cv2.imshow(str(p), im0)
+                cv2.waitKey(1)  # 1 millisecond
+
+            # Save results (image with detections)
+            if save_img:
+                if dataset.mode == 'image':
+                    cv2.imwrite(save_path, im0)
+                else:  # 'video' or 'stream'
+                    if vid_path[i] != save_path:  # new video
+                        vid_path[i] = save_path
+                        if isinstance(vid_writer[i], cv2.VideoWriter):
+                            vid_writer[i].release()  # release previous video writer
+                        if vid_cap:  # video
+                            fps = vid_cap.get(cv2.CAP_PROP_FPS)
+                            w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+                            h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+                        else:  # stream
+                            fps, w, h = 30, im0.shape[1], im0.shape[0]
+                        save_path = str(Path(save_path).with_suffix('.mp4'))  # force *.mp4 suffix on results videos
+                        vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
+                    vid_writer[i].write(im0)
+
+        # Print time (inference-only)
+        LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms')
+
+    # Print results
+    t = tuple(x.t / seen * 1E3 for x in dt)  # speeds per image
+    LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
+    if save_txt or save_img:
+        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
+        LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
+    if update:
+        strip_optimizer(weights[0])  # update model (to fix SourceChangeWarning)
+
+
+def parse_opt():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)')
+    parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
+    parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
+    parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w')
+    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+    parser.add_argument('--view-img', action='store_true', help='show results')
+    parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
+    parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
+    parser.add_argument('--augment', action='store_true', help='augmented inference')
+    parser.add_argument('--visualize', action='store_true', help='visualize features')
+    parser.add_argument('--update', action='store_true', help='update all models')
+    parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save results to project/name')
+    parser.add_argument('--name', default='exp', help='save results to project/name')
+    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+    parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
+    parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
+    parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
+    opt = parser.parse_args()
+    opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1  # expand
+    print_args(vars(opt))
+    return opt
+
+
+def main(opt):
+    check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+    run(**vars(opt))
+
+
+if __name__ == '__main__':
+    opt = parse_opt()
+    main(opt)

+ 333 - 0
classify/train.py

@@ -0,0 +1,333 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Train a YOLOv5 classifier model on a classification dataset
+
+Usage - Single-GPU training:
+    $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224
+
+Usage - Multi-GPU DDP training:
+    $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
+
+Datasets:           --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data'
+YOLOv5-cls models:  --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt
+Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html
+"""
+
+import argparse
+import os
+import subprocess
+import sys
+import time
+from copy import deepcopy
+from datetime import datetime
+from pathlib import Path
+
+import torch
+import torch.distributed as dist
+import torch.hub as hub
+import torch.optim.lr_scheduler as lr_scheduler
+import torchvision
+from torch.cuda import amp
+from tqdm import tqdm
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+
+from classify import val as validate
+from models.experimental import attempt_load
+from models.yolo import ClassificationModel, DetectionModel
+from utils.dataloaders import create_classification_dataloader
+from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status,
+                           check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save)
+from utils.loggers import GenericLogger
+from utils.plots import imshow_cls
+from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP,
+                               smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first)
+
+LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1))  # https://pytorch.org/docs/stable/elastic/run.html
+RANK = int(os.getenv('RANK', -1))
+WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
+GIT_INFO = check_git_info()
+
+
+def train(opt, device):
+    init_seeds(opt.seed + 1 + RANK, deterministic=True)
+    save_dir, data, bs, epochs, nw, imgsz, pretrained = \
+        opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \
+        opt.imgsz, str(opt.pretrained).lower() == 'true'
+    cuda = device.type != 'cpu'
+
+    # Directories
+    wdir = save_dir / 'weights'
+    wdir.mkdir(parents=True, exist_ok=True)  # make dir
+    last, best = wdir / 'last.pt', wdir / 'best.pt'
+
+    # Save run settings
+    yaml_save(save_dir / 'opt.yaml', vars(opt))
+
+    # Logger
+    logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None
+
+    # Download Dataset
+    with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
+        data_dir = data if data.is_dir() else (DATASETS_DIR / data)
+        if not data_dir.is_dir():
+            LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...')
+            t = time.time()
+            if str(data) == 'imagenet':
+                subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True)
+            else:
+                url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip'
+                download(url, dir=data_dir.parent)
+            s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n"
+            LOGGER.info(s)
+
+    # Dataloaders
+    nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()])  # number of classes
+    trainloader = create_classification_dataloader(path=data_dir / 'train',
+                                                   imgsz=imgsz,
+                                                   batch_size=bs // WORLD_SIZE,
+                                                   augment=True,
+                                                   cache=opt.cache,
+                                                   rank=LOCAL_RANK,
+                                                   workers=nw)
+
+    test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val'  # data/test or data/val
+    if RANK in {-1, 0}:
+        testloader = create_classification_dataloader(path=test_dir,
+                                                      imgsz=imgsz,
+                                                      batch_size=bs // WORLD_SIZE * 2,
+                                                      augment=False,
+                                                      cache=opt.cache,
+                                                      rank=-1,
+                                                      workers=nw)
+
+    # Model
+    with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
+        if Path(opt.model).is_file() or opt.model.endswith('.pt'):
+            model = attempt_load(opt.model, device='cpu', fuse=False)
+        elif opt.model in torchvision.models.__dict__:  # TorchVision models i.e. resnet50, efficientnet_b0
+            model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None)
+        else:
+            m = hub.list('ultralytics/yolov5')  # + hub.list('pytorch/vision')  # models
+            raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m))
+        if isinstance(model, DetectionModel):
+            LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'")
+            model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10)  # convert to classification model
+        reshape_classifier_output(model, nc)  # update class count
+    for m in model.modules():
+        if not pretrained and hasattr(m, 'reset_parameters'):
+            m.reset_parameters()
+        if isinstance(m, torch.nn.Dropout) and opt.dropout is not None:
+            m.p = opt.dropout  # set dropout
+    for p in model.parameters():
+        p.requires_grad = True  # for training
+    model = model.to(device)
+
+    # Info
+    if RANK in {-1, 0}:
+        model.names = trainloader.dataset.classes  # attach class names
+        model.transforms = testloader.dataset.torch_transforms  # attach inference transforms
+        model_info(model)
+        if opt.verbose:
+            LOGGER.info(model)
+        images, labels = next(iter(trainloader))
+        file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg')
+        logger.log_images(file, name='Train Examples')
+        logger.log_graph(model, imgsz)  # log model
+
+    # Optimizer
+    optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay)
+
+    # Scheduler
+    lrf = 0.01  # final lr (fraction of lr0)
+    # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf  # cosine
+    lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf  # linear
+    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
+    # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
+    #                                    final_div_factor=1 / 25 / lrf)
+
+    # EMA
+    ema = ModelEMA(model) if RANK in {-1, 0} else None
+
+    # DDP mode
+    if cuda and RANK != -1:
+        model = smart_DDP(model)
+
+    # Train
+    t0 = time.time()
+    criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing)  # loss function
+    best_fitness = 0.0
+    scaler = amp.GradScaler(enabled=cuda)
+    val = test_dir.stem  # 'val' or 'test'
+    LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n'
+                f'Using {nw * WORLD_SIZE} dataloader workers\n'
+                f"Logging results to {colorstr('bold', save_dir)}\n"
+                f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n'
+                f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}")
+    for epoch in range(epochs):  # loop over the dataset multiple times
+        tloss, vloss, fitness = 0.0, 0.0, 0.0  # train loss, val loss, fitness
+        model.train()
+        if RANK != -1:
+            trainloader.sampler.set_epoch(epoch)
+        pbar = enumerate(trainloader)
+        if RANK in {-1, 0}:
+            pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT)
+        for i, (images, labels) in pbar:  # progress bar
+            images, labels = images.to(device, non_blocking=True), labels.to(device)
+
+            # Forward
+            with amp.autocast(enabled=cuda):  # stability issues when enabled
+                loss = criterion(model(images), labels)
+
+            # Backward
+            scaler.scale(loss).backward()
+
+            # Optimize
+            scaler.unscale_(optimizer)  # unscale gradients
+            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0)  # clip gradients
+            scaler.step(optimizer)
+            scaler.update()
+            optimizer.zero_grad()
+            if ema:
+                ema.update(model)
+
+            if RANK in {-1, 0}:
+                # Print
+                tloss = (tloss * i + loss.item()) / (i + 1)  # update mean losses
+                mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0)  # (GB)
+                pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36
+
+                # Test
+                if i == len(pbar) - 1:  # last batch
+                    top1, top5, vloss = validate.run(model=ema.ema,
+                                                     dataloader=testloader,
+                                                     criterion=criterion,
+                                                     pbar=pbar)  # test accuracy, loss
+                    fitness = top1  # define fitness as top1 accuracy
+
+        # Scheduler
+        scheduler.step()
+
+        # Log metrics
+        if RANK in {-1, 0}:
+            # Best fitness
+            if fitness > best_fitness:
+                best_fitness = fitness
+
+            # Log
+            metrics = {
+                'train/loss': tloss,
+                f'{val}/loss': vloss,
+                'metrics/accuracy_top1': top1,
+                'metrics/accuracy_top5': top5,
+                'lr/0': optimizer.param_groups[0]['lr']}  # learning rate
+            logger.log_metrics(metrics, epoch)
+
+            # Save model
+            final_epoch = epoch + 1 == epochs
+            if (not opt.nosave) or final_epoch:
+                ckpt = {
+                    'epoch': epoch,
+                    'best_fitness': best_fitness,
+                    'model': deepcopy(ema.ema).half(),  # deepcopy(de_parallel(model)).half(),
+                    'ema': None,  # deepcopy(ema.ema).half(),
+                    'updates': ema.updates,
+                    'optimizer': None,  # optimizer.state_dict(),
+                    'opt': vars(opt),
+                    'git': GIT_INFO,  # {remote, branch, commit} if a git repo
+                    'date': datetime.now().isoformat()}
+
+                # Save last, best and delete
+                torch.save(ckpt, last)
+                if best_fitness == fitness:
+                    torch.save(ckpt, best)
+                del ckpt
+
+    # Train complete
+    if RANK in {-1, 0} and final_epoch:
+        LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)'
+                    f"\nResults saved to {colorstr('bold', save_dir)}"
+                    f'\nPredict:         python classify/predict.py --weights {best} --source im.jpg'
+                    f'\nValidate:        python classify/val.py --weights {best} --data {data_dir}'
+                    f'\nExport:          python export.py --weights {best} --include onnx'
+                    f"\nPyTorch Hub:     model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')"
+                    f'\nVisualize:       https://netron.app\n')
+
+        # Plot examples
+        images, labels = (x[:25] for x in next(iter(testloader)))  # first 25 images and labels
+        pred = torch.max(ema.ema(images.to(device)), 1)[1]
+        file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / 'test_images.jpg')
+
+        # Log results
+        meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()}
+        logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch)
+        logger.log_model(best, epochs, metadata=meta)
+
+
+def parse_opt(known=False):
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path')
+    parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...')
+    parser.add_argument('--epochs', type=int, default=10, help='total training epochs')
+    parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs')
+    parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)')
+    parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
+    parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
+    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+    parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
+    parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name')
+    parser.add_argument('--name', default='exp', help='save to project/name')
+    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+    parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False')
+    parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer')
+    parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate')
+    parser.add_argument('--decay', type=float, default=5e-5, help='weight decay')
+    parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon')
+    parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head')
+    parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)')
+    parser.add_argument('--verbose', action='store_true', help='Verbose mode')
+    parser.add_argument('--seed', type=int, default=0, help='Global training seed')
+    parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
+    return parser.parse_known_args()[0] if known else parser.parse_args()
+
+
+def main(opt):
+    # Checks
+    if RANK in {-1, 0}:
+        print_args(vars(opt))
+        check_git_status()
+        check_requirements(ROOT / 'requirements.txt')
+
+    # DDP mode
+    device = select_device(opt.device, batch_size=opt.batch_size)
+    if LOCAL_RANK != -1:
+        assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size'
+        assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
+        assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
+        torch.cuda.set_device(LOCAL_RANK)
+        device = torch.device('cuda', LOCAL_RANK)
+        dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo')
+
+    # Parameters
+    opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)  # increment run
+
+    # Train
+    train(opt, device)
+
+
+def run(**kwargs):
+    # Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m')
+    opt = parse_opt(True)
+    for k, v in kwargs.items():
+        setattr(opt, k, v)
+    main(opt)
+    return opt
+
+
+if __name__ == '__main__':
+    opt = parse_opt()
+    main(opt)

+ 1481 - 0
classify/tutorial.ipynb

@@ -0,0 +1,1481 @@
+{
+  "cells": [
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "t6MPjfT5NrKQ"
+      },
+      "source": [
+        "<div align=\"center\">\n",
+        "\n",
+        "  <a href=\"https://ultralytics.com/yolov5\" target=\"_blank\">\n",
+        "    <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png\"></a>\n",
+        "\n",
+        "\n",
+        "<br>\n",
+        "  <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a>\n",
+        "  <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/classify/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
+        "  <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
+        "<br>\n",
+        "\n",
+        "This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>See <a href=\"https://github.com/ultralytics/yolov5/issues/new/choose\">GitHub</a> for community support or <a href=\"https://ultralytics.com/contact\">contact us</a> for professional support.\n",
+        "\n",
+        "</div>"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "7mGmQbAO5pQb"
+      },
+      "source": [
+        "# Setup\n",
+        "\n",
+        "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "wbvMlHd_QwMG",
+        "outputId": "0806e375-610d-4ec0-c867-763dbb518279"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stderr",
+          "text": [
+            "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n"
+          ]
+        },
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n"
+          ]
+        }
+      ],
+      "source": [
+        "!git clone https://github.com/ultralytics/yolov5  # clone\n",
+        "%cd yolov5\n",
+        "%pip install -qr requirements.txt  # install\n",
+        "\n",
+        "import torch\n",
+        "import utils\n",
+        "display = utils.notebook_init()  # checks"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "4JnkELT0cIJg"
+      },
+      "source": [
+        "# 1. Predict\n",
+        "\n",
+        "`classify/predict.py` runs YOLOv5 Classification inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n",
+        "\n",
+        "```shell\n",
+        "python classify/predict.py --source 0  # webcam\n",
+        "                              img.jpg  # image \n",
+        "                              vid.mp4  # video\n",
+        "                              screen  # screenshot\n",
+        "                              path/  # directory\n",
+        "                              'path/*.jpg'  # glob\n",
+        "                              'https://youtu.be/Zgi9g1ksQHc'  # YouTube\n",
+        "                              'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream\n",
+        "```"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "zR9ZbuQCH7FX",
+        "outputId": "50504ef7-aa3e-4281-a4e3-d0c7df3c0ffe"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n",
+            "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
+            "\n",
+            "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt to yolov5s-cls.pt...\n",
+            "100% 10.5M/10.5M [00:00<00:00, 12.3MB/s]\n",
+            "\n",
+            "Fusing layers... \n",
+            "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n",
+            "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n",
+            "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.6ms\n",
+            "Speed: 0.3ms pre-process, 4.3ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n",
+            "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n"
+          ]
+        }
+      ],
+      "source": [
+        "!python classify/predict.py --weights yolov5s-cls.pt --img 224 --source data/images\n",
+        "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "hkAzDWJ7cWTr"
+      },
+      "source": [
+        "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\n",
+        "<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/202808393-50deb439-ae1b-4246-a685-7560c9b37211.jpg\" width=\"600\">"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "0eq1SMWl6Sfn"
+      },
+      "source": [
+        "# 2. Validate\n",
+        "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "WQPtK1QYVaD_",
+        "outputId": "20fc0630-141e-4a90-ea06-342cbd7ce496"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "--2022-11-22 19:53:40--  https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n",
+            "Resolving image-net.org (image-net.org)... 171.64.68.16\n",
+            "Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\n",
+            "HTTP request sent, awaiting response... 200 OK\n",
+            "Length: 6744924160 (6.3G) [application/x-tar]\n",
+            "Saving to: ‘ILSVRC2012_img_val.tar’\n",
+            "\n",
+            "ILSVRC2012_img_val. 100%[===================>]   6.28G  16.1MB/s    in 10m 52s \n",
+            "\n",
+            "2022-11-22 20:04:32 (9.87 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n",
+            "\n"
+          ]
+        }
+      ],
+      "source": [
+        "# Download Imagenet val (6.3G, 50000 images)\n",
+        "!bash data/scripts/get_imagenet.sh --val"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "X58w8JLpMnjH",
+        "outputId": "41843132-98e2-4c25-d474-4cd7b246fb8e"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n",
+            "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
+            "\n",
+            "Fusing layers... \n",
+            "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n",
+            "validating: 100% 391/391 [04:57<00:00,  1.31it/s]\n",
+            "                   Class      Images    top1_acc    top5_acc\n",
+            "                     all       50000       0.715       0.902\n",
+            "                   tench          50        0.94        0.98\n",
+            "                goldfish          50        0.88        0.92\n",
+            "       great white shark          50        0.78        0.96\n",
+            "             tiger shark          50        0.68        0.96\n",
+            "        hammerhead shark          50        0.82        0.92\n",
+            "            electric ray          50        0.76         0.9\n",
+            "                stingray          50         0.7         0.9\n",
+            "                    cock          50        0.78        0.92\n",
+            "                     hen          50        0.84        0.96\n",
+            "                 ostrich          50        0.98           1\n",
+            "               brambling          50         0.9        0.96\n",
+            "               goldfinch          50        0.92        0.98\n",
+            "             house finch          50        0.88        0.96\n",
+            "                   junco          50        0.94        0.98\n",
+            "          indigo bunting          50        0.86        0.88\n",
+            "          American robin          50         0.9        0.96\n",
+            "                  bulbul          50        0.84        0.96\n",
+            "                     jay          50         0.9        0.96\n",
+            "                  magpie          50        0.84        0.96\n",
+            "               chickadee          50         0.9           1\n",
+            "         American dipper          50        0.82        0.92\n",
+            "                    kite          50        0.76        0.94\n",
+            "              bald eagle          50        0.92           1\n",
+            "                 vulture          50        0.96           1\n",
+            "          great grey owl          50        0.94        0.98\n",
+            "         fire salamander          50        0.96        0.98\n",
+            "             smooth newt          50        0.58        0.94\n",
+            "                    newt          50        0.74         0.9\n",
+            "      spotted salamander          50        0.86        0.94\n",
+            "                 axolotl          50        0.86        0.96\n",
+            "       American bullfrog          50        0.78        0.92\n",
+            "               tree frog          50        0.84        0.96\n",
+            "             tailed frog          50        0.48         0.8\n",
+            "   loggerhead sea turtle          50        0.68        0.94\n",
+            "  leatherback sea turtle          50         0.5         0.8\n",
+            "              mud turtle          50        0.64        0.84\n",
+            "                terrapin          50        0.52        0.98\n",
+            "              box turtle          50        0.84        0.98\n",
+            "            banded gecko          50         0.7        0.88\n",
+            "            green iguana          50        0.76        0.94\n",
+            "          Carolina anole          50        0.58        0.96\n",
+            "desert grassland whiptail lizard          50        0.82        0.94\n",
+            "                   agama          50        0.74        0.92\n",
+            "   frilled-necked lizard          50        0.84        0.86\n",
+            "        alligator lizard          50        0.58        0.78\n",
+            "            Gila monster          50        0.72         0.8\n",
+            "   European green lizard          50        0.42         0.9\n",
+            "               chameleon          50        0.76        0.84\n",
+            "           Komodo dragon          50        0.86        0.96\n",
+            "          Nile crocodile          50         0.7        0.84\n",
+            "      American alligator          50        0.76        0.96\n",
+            "             triceratops          50         0.9        0.94\n",
+            "              worm snake          50        0.76        0.88\n",
+            "       ring-necked snake          50         0.8        0.92\n",
+            " eastern hog-nosed snake          50        0.58        0.88\n",
+            "      smooth green snake          50         0.6        0.94\n",
+            "               kingsnake          50        0.82         0.9\n",
+            "            garter snake          50        0.88        0.94\n",
+            "             water snake          50         0.7        0.94\n",
+            "              vine snake          50        0.66        0.76\n",
+            "             night snake          50        0.34        0.82\n",
+            "         boa constrictor          50         0.8        0.96\n",
+            "     African rock python          50        0.48        0.76\n",
+            "            Indian cobra          50        0.82        0.94\n",
+            "             green mamba          50        0.54        0.86\n",
+            "               sea snake          50        0.62         0.9\n",
+            "    Saharan horned viper          50        0.56        0.86\n",
+            "eastern diamondback rattlesnake          50         0.6        0.86\n",
+            "              sidewinder          50        0.28        0.86\n",
+            "               trilobite          50        0.98        0.98\n",
+            "              harvestman          50        0.86        0.94\n",
+            "                scorpion          50        0.86        0.94\n",
+            "    yellow garden spider          50        0.92        0.96\n",
+            "             barn spider          50        0.38        0.98\n",
+            "  European garden spider          50        0.62        0.98\n",
+            "    southern black widow          50        0.88        0.94\n",
+            "               tarantula          50        0.94           1\n",
+            "             wolf spider          50        0.82        0.92\n",
+            "                    tick          50        0.74        0.84\n",
+            "               centipede          50        0.68        0.82\n",
+            "            black grouse          50        0.88        0.98\n",
+            "               ptarmigan          50        0.78        0.94\n",
+            "           ruffed grouse          50        0.88           1\n",
+            "          prairie grouse          50        0.92           1\n",
+            "                 peacock          50        0.88         0.9\n",
+            "                   quail          50         0.9        0.94\n",
+            "               partridge          50        0.74        0.96\n",
+            "             grey parrot          50         0.9        0.96\n",
+            "                   macaw          50        0.88        0.98\n",
+            "sulphur-crested cockatoo          50        0.86        0.92\n",
+            "                lorikeet          50        0.96           1\n",
+            "                  coucal          50        0.82        0.88\n",
+            "               bee eater          50        0.96        0.98\n",
+            "                hornbill          50         0.9        0.96\n",
+            "             hummingbird          50        0.88        0.96\n",
+            "                 jacamar          50        0.92        0.94\n",
+            "                  toucan          50        0.84        0.94\n",
+            "                    duck          50        0.76        0.94\n",
+            "  red-breasted merganser          50        0.86        0.96\n",
+            "                   goose          50        0.74        0.96\n",
+            "              black swan          50        0.94        0.98\n",
+            "                  tusker          50        0.54        0.92\n",
+            "                 echidna          50        0.98           1\n",
+            "                platypus          50        0.72        0.84\n",
+            "                 wallaby          50        0.78        0.88\n",
+            "                   koala          50        0.84        0.92\n",
+            "                  wombat          50        0.78        0.84\n",
+            "               jellyfish          50        0.88        0.96\n",
+            "             sea anemone          50        0.72         0.9\n",
+            "             brain coral          50        0.88        0.96\n",
+            "                flatworm          50         0.8        0.98\n",
+            "                nematode          50        0.86         0.9\n",
+            "                   conch          50        0.74        0.88\n",
+            "                   snail          50        0.78        0.88\n",
+            "                    slug          50        0.74        0.82\n",
+            "                sea slug          50        0.88        0.98\n",
+            "                  chiton          50        0.88        0.98\n",
+            "      chambered nautilus          50        0.88        0.92\n",
+            "          Dungeness crab          50        0.78        0.94\n",
+            "               rock crab          50        0.68        0.86\n",
+            "            fiddler crab          50        0.64        0.86\n",
+            "           red king crab          50        0.76        0.96\n",
+            "        American lobster          50        0.78        0.96\n",
+            "           spiny lobster          50        0.74        0.88\n",
+            "                crayfish          50        0.56        0.86\n",
+            "             hermit crab          50        0.78        0.96\n",
+            "                  isopod          50        0.66        0.78\n",
+            "             white stork          50        0.88        0.96\n",
+            "             black stork          50        0.84        0.98\n",
+            "               spoonbill          50        0.96           1\n",
+            "                flamingo          50        0.94           1\n",
+            "       little blue heron          50        0.92        0.98\n",
+            "             great egret          50         0.9        0.96\n",
+            "                 bittern          50        0.86        0.94\n",
+            "            crane (bird)          50        0.62         0.9\n",
+            "                 limpkin          50        0.98           1\n",
+            "        common gallinule          50        0.92        0.96\n",
+            "           American coot          50         0.9        0.98\n",
+            "                 bustard          50        0.92        0.96\n",
+            "         ruddy turnstone          50        0.94           1\n",
+            "                  dunlin          50        0.86        0.94\n",
+            "         common redshank          50         0.9        0.96\n",
+            "               dowitcher          50        0.84        0.96\n",
+            "           oystercatcher          50        0.86        0.94\n",
+            "                 pelican          50        0.92        0.96\n",
+            "            king penguin          50        0.88        0.96\n",
+            "               albatross          50         0.9           1\n",
+            "              grey whale          50        0.84        0.92\n",
+            "            killer whale          50        0.92           1\n",
+            "                  dugong          50        0.84        0.96\n",
+            "                sea lion          50        0.82        0.92\n",
+            "               Chihuahua          50        0.66        0.84\n",
+            "           Japanese Chin          50        0.72        0.98\n",
+            "                 Maltese          50        0.76        0.94\n",
+            "               Pekingese          50        0.84        0.94\n",
+            "                Shih Tzu          50        0.74        0.96\n",
+            "    King Charles Spaniel          50        0.88        0.98\n",
+            "                Papillon          50        0.86        0.94\n",
+            "             toy terrier          50        0.48        0.94\n",
+            "     Rhodesian Ridgeback          50        0.76        0.98\n",
+            "            Afghan Hound          50        0.84           1\n",
+            "            Basset Hound          50         0.8        0.92\n",
+            "                  Beagle          50        0.82        0.96\n",
+            "              Bloodhound          50        0.48        0.72\n",
+            "      Bluetick Coonhound          50        0.86        0.94\n",
+            " Black and Tan Coonhound          50        0.54         0.8\n",
+            "Treeing Walker Coonhound          50        0.66        0.98\n",
+            "        English foxhound          50        0.32        0.84\n",
+            "       Redbone Coonhound          50        0.62        0.94\n",
+            "                  borzoi          50        0.92           1\n",
+            "         Irish Wolfhound          50        0.48        0.88\n",
+            "       Italian Greyhound          50        0.76        0.98\n",
+            "                 Whippet          50        0.74        0.92\n",
+            "            Ibizan Hound          50         0.6        0.86\n",
+            "      Norwegian Elkhound          50        0.88        0.98\n",
+            "              Otterhound          50        0.62         0.9\n",
+            "                  Saluki          50        0.72        0.92\n",
+            "      Scottish Deerhound          50        0.86        0.98\n",
+            "              Weimaraner          50        0.88        0.94\n",
+            "Staffordshire Bull Terrier          50        0.66        0.98\n",
+            "American Staffordshire Terrier          50        0.64        0.92\n",
+            "      Bedlington Terrier          50         0.9        0.92\n",
+            "          Border Terrier          50        0.86        0.92\n",
+            "      Kerry Blue Terrier          50        0.78        0.98\n",
+            "           Irish Terrier          50         0.7        0.96\n",
+            "         Norfolk Terrier          50        0.68         0.9\n",
+            "         Norwich Terrier          50        0.72           1\n",
+            "       Yorkshire Terrier          50        0.66         0.9\n",
+            "        Wire Fox Terrier          50        0.64        0.98\n",
+            "        Lakeland Terrier          50        0.74        0.92\n",
+            "        Sealyham Terrier          50        0.76         0.9\n",
+            "        Airedale Terrier          50        0.82        0.92\n",
+            "           Cairn Terrier          50        0.76         0.9\n",
+            "      Australian Terrier          50        0.48        0.84\n",
+            "  Dandie Dinmont Terrier          50        0.82        0.92\n",
+            "          Boston Terrier          50        0.92           1\n",
+            "     Miniature Schnauzer          50        0.68         0.9\n",
+            "         Giant Schnauzer          50        0.72        0.98\n",
+            "      Standard Schnauzer          50        0.74           1\n",
+            "        Scottish Terrier          50        0.76        0.96\n",
+            "         Tibetan Terrier          50        0.48           1\n",
+            "Australian Silky Terrier          50        0.66        0.96\n",
+            "Soft-coated Wheaten Terrier          50        0.74        0.96\n",
+            "West Highland White Terrier          50        0.88        0.96\n",
+            "              Lhasa Apso          50        0.68        0.96\n",
+            "   Flat-Coated Retriever          50        0.72        0.94\n",
+            "  Curly-coated Retriever          50        0.82        0.94\n",
+            "        Golden Retriever          50        0.86        0.94\n",
+            "      Labrador Retriever          50        0.82        0.94\n",
+            "Chesapeake Bay Retriever          50        0.76        0.96\n",
+            "German Shorthaired Pointer          50         0.8        0.96\n",
+            "                  Vizsla          50        0.68        0.96\n",
+            "          English Setter          50         0.7           1\n",
+            "            Irish Setter          50         0.8         0.9\n",
+            "           Gordon Setter          50        0.84        0.92\n",
+            "                Brittany          50        0.84        0.96\n",
+            "         Clumber Spaniel          50        0.92        0.96\n",
+            "English Springer Spaniel          50        0.88           1\n",
+            "  Welsh Springer Spaniel          50        0.92           1\n",
+            "         Cocker Spaniels          50         0.7        0.94\n",
+            "          Sussex Spaniel          50        0.72        0.92\n",
+            "     Irish Water Spaniel          50        0.88        0.98\n",
+            "                  Kuvasz          50        0.66         0.9\n",
+            "              Schipperke          50         0.9        0.98\n",
+            "             Groenendael          50         0.8        0.94\n",
+            "                Malinois          50        0.86        0.98\n",
+            "                  Briard          50        0.52         0.8\n",
+            "       Australian Kelpie          50         0.6        0.88\n",
+            "                Komondor          50        0.88        0.94\n",
+            "    Old English Sheepdog          50        0.94        0.98\n",
+            "       Shetland Sheepdog          50        0.74         0.9\n",
+            "                  collie          50         0.6        0.96\n",
+            "           Border Collie          50        0.74        0.96\n",
+            "    Bouvier des Flandres          50        0.78        0.94\n",
+            "              Rottweiler          50        0.88        0.96\n",
+            "     German Shepherd Dog          50         0.8        0.98\n",
+            "               Dobermann          50        0.68        0.96\n",
+            "      Miniature Pinscher          50        0.76        0.88\n",
+            "Greater Swiss Mountain Dog          50        0.68        0.94\n",
+            "    Bernese Mountain Dog          50        0.96           1\n",
+            "  Appenzeller Sennenhund          50        0.22           1\n",
+            "  Entlebucher Sennenhund          50        0.64        0.98\n",
+            "                   Boxer          50         0.7        0.92\n",
+            "             Bullmastiff          50        0.78        0.98\n",
+            "         Tibetan Mastiff          50        0.88        0.96\n",
+            "          French Bulldog          50        0.84        0.94\n",
+            "              Great Dane          50        0.54         0.9\n",
+            "             St. Bernard          50        0.92           1\n",
+            "                   husky          50        0.46        0.98\n",
+            "        Alaskan Malamute          50        0.76        0.96\n",
+            "          Siberian Husky          50        0.46        0.98\n",
+            "               Dalmatian          50        0.94        0.98\n",
+            "           Affenpinscher          50        0.78         0.9\n",
+            "                 Basenji          50        0.92        0.94\n",
+            "                     pug          50        0.94        0.98\n",
+            "              Leonberger          50           1           1\n",
+            "            Newfoundland          50        0.78        0.96\n",
+            "   Pyrenean Mountain Dog          50        0.78        0.96\n",
+            "                 Samoyed          50        0.96           1\n",
+            "              Pomeranian          50        0.98           1\n",
+            "               Chow Chow          50         0.9        0.96\n",
+            "                Keeshond          50        0.88        0.94\n",
+            "      Griffon Bruxellois          50        0.84        0.98\n",
+            "    Pembroke Welsh Corgi          50        0.82        0.94\n",
+            "    Cardigan Welsh Corgi          50        0.66        0.98\n",
+            "              Toy Poodle          50        0.52        0.88\n",
+            "        Miniature Poodle          50        0.52        0.92\n",
+            "         Standard Poodle          50         0.8           1\n",
+            "    Mexican hairless dog          50        0.88        0.98\n",
+            "               grey wolf          50        0.82        0.92\n",
+            "     Alaskan tundra wolf          50        0.78        0.98\n",
+            "                red wolf          50        0.48         0.9\n",
+            "                  coyote          50        0.64        0.86\n",
+            "                   dingo          50        0.76        0.88\n",
+            "                   dhole          50         0.9        0.98\n",
+            "        African wild dog          50        0.98           1\n",
+            "                   hyena          50        0.88        0.96\n",
+            "                 red fox          50        0.54        0.92\n",
+            "                 kit fox          50        0.72        0.98\n",
+            "              Arctic fox          50        0.94           1\n",
+            "                grey fox          50         0.7        0.94\n",
+            "               tabby cat          50        0.54        0.92\n",
+            "               tiger cat          50        0.22        0.94\n",
+            "             Persian cat          50         0.9        0.98\n",
+            "             Siamese cat          50        0.96           1\n",
+            "            Egyptian Mau          50        0.54         0.8\n",
+            "                  cougar          50         0.9           1\n",
+            "                    lynx          50        0.72        0.88\n",
+            "                 leopard          50        0.78        0.98\n",
+            "            snow leopard          50         0.9        0.98\n",
+            "                  jaguar          50         0.7        0.94\n",
+            "                    lion          50         0.9        0.98\n",
+            "                   tiger          50        0.92        0.98\n",
+            "                 cheetah          50        0.94        0.98\n",
+            "              brown bear          50        0.94        0.98\n",
+            "     American black bear          50         0.8           1\n",
+            "              polar bear          50        0.84        0.96\n",
+            "              sloth bear          50        0.72        0.92\n",
+            "                mongoose          50         0.7        0.92\n",
+            "                 meerkat          50        0.82        0.92\n",
+            "            tiger beetle          50        0.92        0.94\n",
+            "                 ladybug          50        0.86        0.94\n",
+            "           ground beetle          50        0.64        0.94\n",
+            "         longhorn beetle          50        0.62        0.88\n",
+            "             leaf beetle          50        0.64        0.98\n",
+            "             dung beetle          50        0.86        0.98\n",
+            "       rhinoceros beetle          50        0.86        0.94\n",
+            "                  weevil          50         0.9           1\n",
+            "                     fly          50        0.78        0.94\n",
+            "                     bee          50        0.68        0.94\n",
+            "                     ant          50        0.68        0.78\n",
+            "             grasshopper          50         0.5        0.92\n",
+            "                 cricket          50        0.64        0.92\n",
+            "            stick insect          50        0.64        0.92\n",
+            "               cockroach          50        0.72         0.8\n",
+            "                  mantis          50        0.64        0.86\n",
+            "                  cicada          50         0.9        0.96\n",
+            "              leafhopper          50        0.88        0.94\n",
+            "                lacewing          50        0.78        0.92\n",
+            "               dragonfly          50        0.82        0.98\n",
+            "               damselfly          50        0.82           1\n",
+            "             red admiral          50        0.94        0.96\n",
+            "                 ringlet          50        0.86        0.98\n",
+            "       monarch butterfly          50         0.9        0.92\n",
+            "             small white          50         0.9           1\n",
+            "       sulphur butterfly          50        0.92           1\n",
+            "gossamer-winged butterfly          50        0.88           1\n",
+            "                starfish          50        0.88        0.92\n",
+            "              sea urchin          50        0.84        0.94\n",
+            "            sea cucumber          50        0.66        0.84\n",
+            "       cottontail rabbit          50        0.72        0.94\n",
+            "                    hare          50        0.84        0.96\n",
+            "           Angora rabbit          50        0.94        0.98\n",
+            "                 hamster          50        0.96           1\n",
+            "               porcupine          50        0.88        0.98\n",
+            "            fox squirrel          50        0.76        0.94\n",
+            "                  marmot          50        0.92        0.96\n",
+            "                  beaver          50        0.78        0.94\n",
+            "              guinea pig          50        0.78        0.94\n",
+            "           common sorrel          50        0.96        0.98\n",
+            "                   zebra          50        0.94        0.96\n",
+            "                     pig          50         0.5        0.76\n",
+            "               wild boar          50        0.84        0.96\n",
+            "                 warthog          50        0.84        0.96\n",
+            "            hippopotamus          50        0.88        0.96\n",
+            "                      ox          50        0.48        0.94\n",
+            "           water buffalo          50        0.78        0.94\n",
+            "                   bison          50        0.88        0.96\n",
+            "                     ram          50        0.58        0.92\n",
+            "           bighorn sheep          50        0.66           1\n",
+            "             Alpine ibex          50        0.92        0.98\n",
+            "              hartebeest          50        0.94           1\n",
+            "                  impala          50        0.82        0.96\n",
+            "                 gazelle          50         0.7        0.96\n",
+            "               dromedary          50         0.9           1\n",
+            "                   llama          50        0.82        0.94\n",
+            "                  weasel          50        0.44        0.92\n",
+            "                    mink          50        0.78        0.96\n",
+            "        European polecat          50        0.46         0.9\n",
+            "     black-footed ferret          50        0.68        0.96\n",
+            "                   otter          50        0.66        0.88\n",
+            "                   skunk          50        0.96        0.96\n",
+            "                  badger          50        0.86        0.92\n",
+            "               armadillo          50        0.88         0.9\n",
+            "        three-toed sloth          50        0.96           1\n",
+            "               orangutan          50        0.78        0.92\n",
+            "                 gorilla          50        0.82        0.94\n",
+            "              chimpanzee          50        0.84        0.94\n",
+            "                  gibbon          50        0.76        0.86\n",
+            "                 siamang          50        0.68        0.94\n",
+            "                  guenon          50         0.8        0.94\n",
+            "            patas monkey          50        0.62        0.82\n",
+            "                  baboon          50         0.9        0.98\n",
+            "                 macaque          50         0.8        0.86\n",
+            "                  langur          50         0.6        0.82\n",
+            " black-and-white colobus          50        0.86         0.9\n",
+            "        proboscis monkey          50           1           1\n",
+            "                marmoset          50        0.74        0.98\n",
+            "   white-headed capuchin          50        0.72         0.9\n",
+            "           howler monkey          50        0.86        0.94\n",
+            "                    titi          50         0.5         0.9\n",
+            "Geoffroy's spider monkey          50        0.42         0.8\n",
+            "  common squirrel monkey          50        0.76        0.92\n",
+            "       ring-tailed lemur          50        0.72        0.94\n",
+            "                   indri          50         0.9        0.96\n",
+            "          Asian elephant          50        0.58        0.92\n",
+            "   African bush elephant          50         0.7        0.98\n",
+            "               red panda          50        0.94        0.94\n",
+            "             giant panda          50        0.94        0.98\n",
+            "                   snoek          50        0.74         0.9\n",
+            "                     eel          50         0.6        0.84\n",
+            "             coho salmon          50        0.84        0.96\n",
+            "             rock beauty          50        0.88        0.98\n",
+            "               clownfish          50        0.78        0.98\n",
+            "                sturgeon          50        0.68        0.94\n",
+            "                 garfish          50        0.62         0.8\n",
+            "                lionfish          50        0.96        0.96\n",
+            "              pufferfish          50        0.88        0.96\n",
+            "                  abacus          50        0.74        0.88\n",
+            "                   abaya          50        0.84        0.92\n",
+            "           academic gown          50        0.42        0.86\n",
+            "               accordion          50         0.8         0.9\n",
+            "         acoustic guitar          50         0.5        0.76\n",
+            "        aircraft carrier          50         0.8        0.96\n",
+            "                airliner          50        0.92           1\n",
+            "                 airship          50        0.76        0.82\n",
+            "                   altar          50        0.64        0.98\n",
+            "               ambulance          50        0.88        0.98\n",
+            "      amphibious vehicle          50        0.64        0.94\n",
+            "            analog clock          50        0.52        0.92\n",
+            "                  apiary          50        0.82        0.96\n",
+            "                   apron          50         0.7        0.84\n",
+            "         waste container          50         0.4         0.8\n",
+            "           assault rifle          50        0.42        0.84\n",
+            "                backpack          50        0.34        0.64\n",
+            "                  bakery          50         0.4        0.68\n",
+            "            balance beam          50         0.8        0.98\n",
+            "                 balloon          50        0.86        0.96\n",
+            "           ballpoint pen          50        0.52        0.96\n",
+            "                Band-Aid          50         0.7         0.9\n",
+            "                   banjo          50        0.84           1\n",
+            "                baluster          50        0.68        0.94\n",
+            "                 barbell          50        0.56         0.9\n",
+            "            barber chair          50         0.7        0.92\n",
+            "              barbershop          50        0.54        0.86\n",
+            "                    barn          50        0.96        0.96\n",
+            "               barometer          50        0.84        0.98\n",
+            "                  barrel          50        0.56        0.88\n",
+            "             wheelbarrow          50        0.66        0.88\n",
+            "                baseball          50        0.74        0.98\n",
+            "              basketball          50        0.88        0.98\n",
+            "                bassinet          50        0.66        0.92\n",
+            "                 bassoon          50        0.74        0.98\n",
+            "            swimming cap          50        0.62        0.88\n",
+            "              bath towel          50        0.54        0.78\n",
+            "                 bathtub          50         0.4        0.88\n",
+            "           station wagon          50        0.66        0.84\n",
+            "              lighthouse          50        0.78        0.94\n",
+            "                  beaker          50        0.52        0.68\n",
+            "            military cap          50        0.84        0.96\n",
+            "             beer bottle          50        0.66        0.88\n",
+            "              beer glass          50         0.6        0.84\n",
+            "                bell-cot          50        0.56        0.96\n",
+            "                     bib          50        0.58        0.82\n",
+            "          tandem bicycle          50        0.86        0.96\n",
+            "                  bikini          50        0.56        0.88\n",
+            "             ring binder          50        0.64        0.84\n",
+            "              binoculars          50        0.54        0.78\n",
+            "               birdhouse          50        0.86        0.94\n",
+            "               boathouse          50        0.74        0.92\n",
+            "               bobsleigh          50        0.92        0.96\n",
+            "                bolo tie          50         0.8        0.94\n",
+            "             poke bonnet          50        0.64        0.86\n",
+            "                bookcase          50        0.66        0.92\n",
+            "               bookstore          50        0.62        0.88\n",
+            "              bottle cap          50        0.58         0.7\n",
+            "                     bow          50        0.72        0.86\n",
+            "                 bow tie          50         0.7         0.9\n",
+            "                   brass          50        0.92        0.96\n",
+            "                     bra          50         0.5         0.7\n",
+            "              breakwater          50        0.62        0.86\n",
+            "             breastplate          50         0.4         0.9\n",
+            "                   broom          50         0.6        0.86\n",
+            "                  bucket          50        0.66         0.8\n",
+            "                  buckle          50         0.5        0.68\n",
+            "        bulletproof vest          50         0.5        0.78\n",
+            "        high-speed train          50        0.94        0.96\n",
+            "            butcher shop          50        0.74        0.94\n",
+            "                 taxicab          50        0.64        0.86\n",
+            "                cauldron          50        0.44        0.66\n",
+            "                  candle          50        0.48        0.74\n",
+            "                  cannon          50        0.88        0.94\n",
+            "                   canoe          50        0.94           1\n",
+            "              can opener          50        0.66        0.86\n",
+            "                cardigan          50        0.68         0.8\n",
+            "              car mirror          50        0.94        0.96\n",
+            "                carousel          50        0.94        0.98\n",
+            "                tool kit          50        0.56        0.78\n",
+            "                  carton          50        0.42         0.7\n",
+            "               car wheel          50        0.38        0.74\n",
+            "automated teller machine          50        0.76        0.94\n",
+            "                cassette          50        0.52         0.8\n",
+            "         cassette player          50        0.28         0.9\n",
+            "                  castle          50        0.78        0.88\n",
+            "               catamaran          50        0.78           1\n",
+            "               CD player          50        0.52        0.82\n",
+            "                   cello          50        0.82           1\n",
+            "            mobile phone          50        0.68        0.86\n",
+            "                   chain          50        0.38        0.66\n",
+            "        chain-link fence          50         0.7        0.84\n",
+            "              chain mail          50        0.64         0.9\n",
+            "                chainsaw          50        0.84        0.92\n",
+            "                   chest          50        0.68        0.92\n",
+            "              chiffonier          50        0.26        0.64\n",
+            "                   chime          50        0.62        0.84\n",
+            "           china cabinet          50        0.82        0.96\n",
+            "      Christmas stocking          50        0.92        0.94\n",
+            "                  church          50        0.62         0.9\n",
+            "           movie theater          50        0.58        0.88\n",
+            "                 cleaver          50        0.32        0.62\n",
+            "          cliff dwelling          50        0.88           1\n",
+            "                   cloak          50        0.32        0.64\n",
+            "                   clogs          50        0.58        0.88\n",
+            "         cocktail shaker          50        0.62         0.7\n",
+            "              coffee mug          50        0.44        0.72\n",
+            "             coffeemaker          50        0.64        0.92\n",
+            "                    coil          50        0.66        0.84\n",
+            "        combination lock          50        0.64        0.84\n",
+            "       computer keyboard          50         0.7        0.82\n",
+            "     confectionery store          50        0.54        0.86\n",
+            "          container ship          50        0.82        0.98\n",
+            "             convertible          50        0.78        0.98\n",
+            "               corkscrew          50        0.82        0.92\n",
+            "                  cornet          50        0.46        0.88\n",
+            "             cowboy boot          50        0.64         0.8\n",
+            "              cowboy hat          50        0.64        0.82\n",
+            "                  cradle          50        0.38         0.8\n",
+            "         crane (machine)          50        0.78        0.94\n",
+            "            crash helmet          50        0.92        0.96\n",
+            "                   crate          50        0.52        0.82\n",
+            "              infant bed          50        0.74           1\n",
+            "               Crock Pot          50        0.78         0.9\n",
+            "            croquet ball          50         0.9        0.96\n",
+            "                  crutch          50        0.46         0.7\n",
+            "                 cuirass          50        0.54        0.86\n",
+            "                     dam          50        0.74        0.92\n",
+            "                    desk          50         0.6        0.86\n",
+            "        desktop computer          50        0.54        0.94\n",
+            "   rotary dial telephone          50        0.88        0.94\n",
+            "                  diaper          50        0.68        0.84\n",
+            "           digital clock          50        0.54        0.76\n",
+            "           digital watch          50        0.58        0.86\n",
+            "            dining table          50        0.76         0.9\n",
+            "               dishcloth          50        0.94           1\n",
+            "              dishwasher          50        0.44        0.78\n",
+            "              disc brake          50        0.98           1\n",
+            "                    dock          50        0.54        0.94\n",
+            "                dog sled          50        0.84           1\n",
+            "                    dome          50        0.72        0.92\n",
+            "                 doormat          50        0.56        0.82\n",
+            "            drilling rig          50        0.84        0.96\n",
+            "                    drum          50        0.38        0.68\n",
+            "               drumstick          50        0.56        0.72\n",
+            "                dumbbell          50        0.62         0.9\n",
+            "              Dutch oven          50         0.7        0.84\n",
+            "            electric fan          50        0.82        0.86\n",
+            "         electric guitar          50        0.62        0.84\n",
+            "     electric locomotive          50        0.92        0.98\n",
+            "    entertainment center          50         0.9        0.98\n",
+            "                envelope          50        0.44        0.86\n",
+            "        espresso machine          50        0.72        0.94\n",
+            "             face powder          50         0.7        0.92\n",
+            "             feather boa          50         0.7        0.84\n",
+            "          filing cabinet          50        0.88        0.98\n",
+            "                fireboat          50        0.94        0.98\n",
+            "             fire engine          50        0.84         0.9\n",
+            "       fire screen sheet          50        0.62        0.76\n",
+            "                flagpole          50        0.74        0.88\n",
+            "                   flute          50        0.36        0.72\n",
+            "           folding chair          50        0.62        0.84\n",
+            "         football helmet          50        0.86        0.94\n",
+            "                forklift          50         0.8        0.92\n",
+            "                fountain          50        0.84        0.94\n",
+            "            fountain pen          50        0.76        0.92\n",
+            "         four-poster bed          50        0.78        0.94\n",
+            "             freight car          50        0.96           1\n",
+            "             French horn          50        0.76        0.92\n",
+            "              frying pan          50        0.36        0.78\n",
+            "                fur coat          50        0.84        0.96\n",
+            "           garbage truck          50         0.9        0.98\n",
+            "                gas mask          50        0.84        0.92\n",
+            "                gas pump          50         0.9        0.98\n",
+            "                  goblet          50        0.68        0.82\n",
+            "                 go-kart          50         0.9           1\n",
+            "               golf ball          50        0.84         0.9\n",
+            "               golf cart          50        0.78        0.86\n",
+            "                 gondola          50        0.98        0.98\n",
+            "                    gong          50        0.74        0.92\n",
+            "                    gown          50        0.62        0.96\n",
+            "             grand piano          50         0.7        0.96\n",
+            "              greenhouse          50         0.8        0.98\n",
+            "                  grille          50        0.72         0.9\n",
+            "           grocery store          50        0.66        0.94\n",
+            "              guillotine          50        0.86        0.92\n",
+            "                barrette          50        0.52        0.66\n",
+            "              hair spray          50         0.5        0.74\n",
+            "              half-track          50        0.78         0.9\n",
+            "                  hammer          50        0.56        0.76\n",
+            "                  hamper          50        0.64        0.84\n",
+            "              hair dryer          50        0.56        0.74\n",
+            "      hand-held computer          50        0.42        0.86\n",
+            "            handkerchief          50        0.78        0.94\n",
+            "         hard disk drive          50        0.76        0.84\n",
+            "               harmonica          50         0.7        0.88\n",
+            "                    harp          50        0.88        0.96\n",
+            "               harvester          50        0.78           1\n",
+            "                 hatchet          50        0.54        0.74\n",
+            "                 holster          50        0.66        0.84\n",
+            "            home theater          50        0.64        0.94\n",
+            "               honeycomb          50        0.56        0.88\n",
+            "                    hook          50         0.3         0.6\n",
+            "              hoop skirt          50        0.64        0.86\n",
+            "          horizontal bar          50        0.68        0.98\n",
+            "     horse-drawn vehicle          50        0.88        0.94\n",
+            "               hourglass          50        0.88        0.96\n",
+            "                    iPod          50        0.76        0.94\n",
+            "            clothes iron          50        0.82        0.88\n",
+            "         jack-o'-lantern          50        0.98        0.98\n",
+            "                   jeans          50        0.68        0.84\n",
+            "                    jeep          50        0.72         0.9\n",
+            "                 T-shirt          50        0.72        0.96\n",
+            "           jigsaw puzzle          50        0.84        0.94\n",
+            "         pulled rickshaw          50        0.86        0.94\n",
+            "                joystick          50         0.8         0.9\n",
+            "                  kimono          50        0.84        0.96\n",
+            "                knee pad          50        0.62        0.88\n",
+            "                    knot          50        0.66         0.8\n",
+            "                lab coat          50         0.8        0.96\n",
+            "                   ladle          50        0.36        0.64\n",
+            "               lampshade          50        0.48        0.84\n",
+            "         laptop computer          50        0.26        0.88\n",
+            "              lawn mower          50        0.78        0.96\n",
+            "                lens cap          50        0.46        0.72\n",
+            "             paper knife          50        0.26         0.5\n",
+            "                 library          50        0.54         0.9\n",
+            "                lifeboat          50        0.92        0.98\n",
+            "                 lighter          50        0.56        0.78\n",
+            "               limousine          50        0.76        0.92\n",
+            "             ocean liner          50        0.88        0.94\n",
+            "                lipstick          50        0.74         0.9\n",
+            "            slip-on shoe          50        0.74        0.92\n",
+            "                  lotion          50         0.5        0.86\n",
+            "                 speaker          50        0.52        0.68\n",
+            "                   loupe          50        0.32        0.52\n",
+            "                 sawmill          50        0.72         0.9\n",
+            "        magnetic compass          50        0.52        0.82\n",
+            "                mail bag          50        0.68        0.92\n",
+            "                 mailbox          50        0.82        0.92\n",
+            "                  tights          50        0.22        0.94\n",
+            "               tank suit          50        0.24         0.9\n",
+            "           manhole cover          50        0.96        0.98\n",
+            "                  maraca          50        0.74         0.9\n",
+            "                 marimba          50        0.84        0.94\n",
+            "                    mask          50        0.44        0.82\n",
+            "                   match          50        0.66         0.9\n",
+            "                 maypole          50        0.96           1\n",
+            "                    maze          50         0.8        0.96\n",
+            "           measuring cup          50        0.54        0.76\n",
+            "          medicine chest          50         0.6        0.84\n",
+            "                megalith          50         0.8        0.92\n",
+            "              microphone          50        0.52         0.7\n",
+            "          microwave oven          50        0.48        0.72\n",
+            "        military uniform          50        0.62        0.84\n",
+            "                milk can          50        0.68        0.82\n",
+            "                 minibus          50         0.7           1\n",
+            "               miniskirt          50        0.46        0.76\n",
+            "                 minivan          50        0.38         0.8\n",
+            "                 missile          50         0.4        0.84\n",
+            "                  mitten          50        0.76        0.88\n",
+            "             mixing bowl          50         0.8        0.92\n",
+            "             mobile home          50        0.54        0.78\n",
+            "                 Model T          50        0.92        0.96\n",
+            "                   modem          50        0.58        0.86\n",
+            "               monastery          50        0.44         0.9\n",
+            "                 monitor          50         0.4        0.86\n",
+            "                   moped          50        0.56        0.94\n",
+            "                  mortar          50        0.68        0.94\n",
+            "     square academic cap          50         0.5        0.84\n",
+            "                  mosque          50         0.9           1\n",
+            "            mosquito net          50         0.9        0.98\n",
+            "                 scooter          50         0.9        0.98\n",
+            "           mountain bike          50        0.78        0.96\n",
+            "                    tent          50        0.88        0.96\n",
+            "          computer mouse          50        0.42        0.82\n",
+            "               mousetrap          50        0.76        0.88\n",
+            "              moving van          50         0.4        0.72\n",
+            "                  muzzle          50         0.5        0.72\n",
+            "                    nail          50        0.68        0.74\n",
+            "              neck brace          50        0.56        0.68\n",
+            "                necklace          50        0.86           1\n",
+            "                  nipple          50         0.7        0.88\n",
+            "       notebook computer          50        0.34        0.84\n",
+            "                 obelisk          50         0.8        0.92\n",
+            "                    oboe          50         0.6        0.84\n",
+            "                 ocarina          50         0.8        0.86\n",
+            "                odometer          50        0.96           1\n",
+            "              oil filter          50        0.58        0.82\n",
+            "                   organ          50        0.82         0.9\n",
+            "            oscilloscope          50         0.9        0.96\n",
+            "               overskirt          50         0.2         0.7\n",
+            "            bullock cart          50         0.7        0.94\n",
+            "             oxygen mask          50        0.46        0.84\n",
+            "                  packet          50         0.5        0.78\n",
+            "                  paddle          50        0.56        0.94\n",
+            "            paddle wheel          50        0.86        0.96\n",
+            "                 padlock          50        0.74        0.78\n",
+            "              paintbrush          50        0.62         0.8\n",
+            "                 pajamas          50        0.56        0.92\n",
+            "                  palace          50        0.64        0.96\n",
+            "               pan flute          50        0.84        0.86\n",
+            "             paper towel          50        0.66        0.84\n",
+            "               parachute          50        0.92        0.94\n",
+            "           parallel bars          50        0.62        0.96\n",
+            "              park bench          50        0.74         0.9\n",
+            "           parking meter          50        0.84        0.92\n",
+            "           passenger car          50         0.5        0.82\n",
+            "                   patio          50        0.58        0.84\n",
+            "                payphone          50        0.74        0.92\n",
+            "                pedestal          50        0.52         0.9\n",
+            "             pencil case          50        0.64        0.92\n",
+            "        pencil sharpener          50        0.52        0.78\n",
+            "                 perfume          50         0.7         0.9\n",
+            "              Petri dish          50         0.6         0.8\n",
+            "             photocopier          50        0.88        0.98\n",
+            "                plectrum          50         0.7        0.84\n",
+            "             Pickelhaube          50        0.72        0.86\n",
+            "            picket fence          50        0.84        0.94\n",
+            "            pickup truck          50        0.64        0.92\n",
+            "                    pier          50        0.52        0.82\n",
+            "              piggy bank          50        0.82        0.94\n",
+            "             pill bottle          50        0.76        0.86\n",
+            "                  pillow          50        0.76         0.9\n",
+            "          ping-pong ball          50        0.84        0.88\n",
+            "                pinwheel          50        0.76        0.88\n",
+            "             pirate ship          50        0.76        0.94\n",
+            "                 pitcher          50        0.46        0.84\n",
+            "              hand plane          50        0.84        0.94\n",
+            "             planetarium          50        0.88        0.98\n",
+            "             plastic bag          50        0.36        0.62\n",
+            "              plate rack          50        0.52        0.78\n",
+            "                    plow          50        0.78        0.88\n",
+            "                 plunger          50        0.42         0.7\n",
+            "         Polaroid camera          50        0.84        0.92\n",
+            "                    pole          50        0.38        0.74\n",
+            "              police van          50        0.76        0.94\n",
+            "                  poncho          50        0.58        0.86\n",
+            "          billiard table          50         0.8        0.88\n",
+            "             soda bottle          50        0.56        0.94\n",
+            "                     pot          50        0.78        0.92\n",
+            "          potter's wheel          50         0.9        0.94\n",
+            "             power drill          50        0.42        0.72\n",
+            "              prayer rug          50         0.7        0.86\n",
+            "                 printer          50        0.54        0.86\n",
+            "                  prison          50         0.7         0.9\n",
+            "              projectile          50        0.28         0.9\n",
+            "               projector          50        0.62        0.84\n",
+            "             hockey puck          50        0.92        0.96\n",
+            "            punching bag          50         0.6        0.68\n",
+            "                   purse          50        0.42        0.78\n",
+            "                   quill          50        0.68        0.84\n",
+            "                   quilt          50        0.64         0.9\n",
+            "                race car          50        0.72        0.92\n",
+            "                  racket          50        0.72         0.9\n",
+            "                radiator          50        0.66        0.76\n",
+            "                   radio          50        0.64        0.92\n",
+            "         radio telescope          50         0.9        0.96\n",
+            "             rain barrel          50         0.8        0.98\n",
+            "    recreational vehicle          50        0.84        0.94\n",
+            "                    reel          50        0.72        0.82\n",
+            "           reflex camera          50        0.72        0.92\n",
+            "            refrigerator          50         0.7         0.9\n",
+            "          remote control          50         0.7        0.88\n",
+            "              restaurant          50         0.5        0.66\n",
+            "                revolver          50        0.82           1\n",
+            "                   rifle          50        0.38         0.7\n",
+            "           rocking chair          50        0.62        0.84\n",
+            "              rotisserie          50        0.88        0.92\n",
+            "                  eraser          50        0.54        0.76\n",
+            "              rugby ball          50        0.86        0.94\n",
+            "                   ruler          50        0.68        0.86\n",
+            "            running shoe          50        0.78        0.94\n",
+            "                    safe          50        0.82        0.92\n",
+            "              safety pin          50         0.4        0.62\n",
+            "             salt shaker          50        0.66         0.9\n",
+            "                  sandal          50        0.66        0.86\n",
+            "                  sarong          50        0.64        0.86\n",
+            "               saxophone          50        0.66        0.88\n",
+            "                scabbard          50        0.76        0.92\n",
+            "          weighing scale          50        0.58        0.78\n",
+            "              school bus          50        0.92           1\n",
+            "                schooner          50        0.84           1\n",
+            "              scoreboard          50         0.9        0.96\n",
+            "              CRT screen          50        0.14         0.7\n",
+            "                   screw          50         0.9        0.98\n",
+            "             screwdriver          50         0.3        0.58\n",
+            "               seat belt          50        0.88        0.94\n",
+            "          sewing machine          50        0.76         0.9\n",
+            "                  shield          50        0.56        0.82\n",
+            "              shoe store          50        0.78        0.96\n",
+            "                   shoji          50         0.8        0.92\n",
+            "         shopping basket          50        0.52        0.88\n",
+            "           shopping cart          50        0.76        0.92\n",
+            "                  shovel          50        0.62        0.84\n",
+            "              shower cap          50         0.7        0.84\n",
+            "          shower curtain          50        0.64        0.82\n",
+            "                     ski          50        0.74        0.92\n",
+            "                ski mask          50        0.72        0.88\n",
+            "            sleeping bag          50        0.68         0.8\n",
+            "              slide rule          50        0.72        0.88\n",
+            "            sliding door          50        0.44        0.78\n",
+            "            slot machine          50        0.94        0.98\n",
+            "                 snorkel          50        0.86        0.98\n",
+            "              snowmobile          50        0.88           1\n",
+            "                snowplow          50        0.84        0.98\n",
+            "          soap dispenser          50        0.56        0.86\n",
+            "             soccer ball          50        0.86        0.96\n",
+            "                    sock          50        0.62        0.76\n",
+            " solar thermal collector          50        0.72        0.96\n",
+            "                sombrero          50         0.6        0.84\n",
+            "               soup bowl          50        0.56        0.94\n",
+            "               space bar          50        0.34        0.88\n",
+            "            space heater          50        0.52        0.74\n",
+            "           space shuttle          50        0.82        0.96\n",
+            "                 spatula          50         0.3         0.6\n",
+            "               motorboat          50        0.86           1\n",
+            "              spider web          50         0.7         0.9\n",
+            "                 spindle          50        0.86        0.98\n",
+            "              sports car          50         0.6        0.94\n",
+            "               spotlight          50        0.26         0.6\n",
+            "                   stage          50        0.68        0.86\n",
+            "        steam locomotive          50        0.94           1\n",
+            "     through arch bridge          50        0.84        0.96\n",
+            "              steel drum          50        0.82         0.9\n",
+            "             stethoscope          50         0.6        0.82\n",
+            "                   scarf          50         0.5        0.92\n",
+            "              stone wall          50        0.76         0.9\n",
+            "               stopwatch          50        0.58         0.9\n",
+            "                   stove          50        0.46        0.74\n",
+            "                strainer          50        0.64        0.84\n",
+            "                    tram          50        0.88        0.96\n",
+            "               stretcher          50         0.6         0.8\n",
+            "                   couch          50         0.8        0.96\n",
+            "                   stupa          50        0.88        0.88\n",
+            "               submarine          50        0.72        0.92\n",
+            "                    suit          50         0.4        0.78\n",
+            "                 sundial          50        0.58        0.74\n",
+            "                sunglass          50        0.14        0.58\n",
+            "              sunglasses          50        0.28        0.58\n",
+            "               sunscreen          50        0.32         0.7\n",
+            "       suspension bridge          50         0.6        0.94\n",
+            "                     mop          50        0.74        0.92\n",
+            "              sweatshirt          50        0.28        0.66\n",
+            "                swimsuit          50        0.52        0.82\n",
+            "                   swing          50        0.76        0.84\n",
+            "                  switch          50        0.56        0.76\n",
+            "                 syringe          50        0.62        0.82\n",
+            "              table lamp          50         0.6        0.88\n",
+            "                    tank          50         0.8        0.96\n",
+            "             tape player          50        0.46        0.76\n",
+            "                  teapot          50        0.84           1\n",
+            "              teddy bear          50        0.82        0.94\n",
+            "              television          50         0.6         0.9\n",
+            "             tennis ball          50         0.7        0.94\n",
+            "           thatched roof          50        0.88         0.9\n",
+            "           front curtain          50         0.8        0.92\n",
+            "                 thimble          50         0.6         0.8\n",
+            "       threshing machine          50        0.56        0.88\n",
+            "                  throne          50        0.72        0.82\n",
+            "               tile roof          50        0.72        0.94\n",
+            "                 toaster          50        0.66        0.84\n",
+            "            tobacco shop          50        0.42         0.7\n",
+            "             toilet seat          50        0.62        0.88\n",
+            "                   torch          50        0.64        0.84\n",
+            "              totem pole          50        0.92        0.98\n",
+            "               tow truck          50        0.62        0.88\n",
+            "               toy store          50         0.6        0.94\n",
+            "                 tractor          50        0.76        0.98\n",
+            "      semi-trailer truck          50        0.78        0.92\n",
+            "                    tray          50        0.46        0.64\n",
+            "             trench coat          50        0.54        0.72\n",
+            "                tricycle          50        0.72        0.94\n",
+            "                trimaran          50         0.7        0.98\n",
+            "                  tripod          50        0.58        0.86\n",
+            "          triumphal arch          50        0.92        0.98\n",
+            "              trolleybus          50         0.9           1\n",
+            "                trombone          50        0.54        0.88\n",
+            "                     tub          50        0.24        0.82\n",
+            "               turnstile          50        0.84        0.94\n",
+            "     typewriter keyboard          50        0.68        0.98\n",
+            "                umbrella          50        0.52         0.7\n",
+            "                unicycle          50        0.74        0.96\n",
+            "           upright piano          50        0.76         0.9\n",
+            "          vacuum cleaner          50        0.62         0.9\n",
+            "                    vase          50         0.5        0.78\n",
+            "                   vault          50        0.76        0.92\n",
+            "                  velvet          50         0.2        0.42\n",
+            "         vending machine          50         0.9           1\n",
+            "                vestment          50        0.54        0.82\n",
+            "                 viaduct          50        0.78        0.86\n",
+            "                  violin          50        0.68        0.78\n",
+            "              volleyball          50        0.86           1\n",
+            "             waffle iron          50        0.72        0.88\n",
+            "              wall clock          50        0.54        0.88\n",
+            "                  wallet          50        0.52         0.9\n",
+            "                wardrobe          50        0.68        0.88\n",
+            "       military aircraft          50         0.9        0.98\n",
+            "                    sink          50        0.72        0.96\n",
+            "         washing machine          50        0.78        0.94\n",
+            "            water bottle          50        0.54        0.74\n",
+            "               water jug          50        0.22        0.74\n",
+            "             water tower          50         0.9        0.96\n",
+            "             whiskey jug          50        0.64        0.74\n",
+            "                 whistle          50        0.72        0.84\n",
+            "                     wig          50        0.84         0.9\n",
+            "           window screen          50        0.68         0.8\n",
+            "            window shade          50        0.52        0.76\n",
+            "             Windsor tie          50        0.22        0.66\n",
+            "             wine bottle          50        0.42        0.82\n",
+            "                    wing          50        0.54        0.96\n",
+            "                     wok          50        0.46        0.82\n",
+            "            wooden spoon          50        0.58         0.8\n",
+            "                    wool          50        0.32        0.82\n",
+            "        split-rail fence          50        0.74         0.9\n",
+            "               shipwreck          50        0.84        0.96\n",
+            "                    yawl          50        0.78        0.96\n",
+            "                    yurt          50        0.84           1\n",
+            "                 website          50        0.98           1\n",
+            "              comic book          50        0.62         0.9\n",
+            "               crossword          50        0.84        0.88\n",
+            "            traffic sign          50        0.78         0.9\n",
+            "           traffic light          50         0.8        0.94\n",
+            "             dust jacket          50        0.72        0.94\n",
+            "                    menu          50        0.82        0.96\n",
+            "                   plate          50        0.44        0.88\n",
+            "               guacamole          50         0.8        0.92\n",
+            "                consomme          50        0.54        0.88\n",
+            "                 hot pot          50        0.86        0.98\n",
+            "                  trifle          50        0.92        0.98\n",
+            "               ice cream          50        0.68        0.94\n",
+            "                 ice pop          50        0.62        0.84\n",
+            "                baguette          50        0.62        0.88\n",
+            "                   bagel          50        0.64        0.92\n",
+            "                 pretzel          50        0.72        0.88\n",
+            "            cheeseburger          50         0.9           1\n",
+            "                 hot dog          50        0.74        0.94\n",
+            "           mashed potato          50        0.74         0.9\n",
+            "                 cabbage          50        0.84        0.96\n",
+            "                broccoli          50         0.9        0.96\n",
+            "             cauliflower          50        0.82           1\n",
+            "                zucchini          50        0.74         0.9\n",
+            "        spaghetti squash          50         0.8        0.96\n",
+            "            acorn squash          50        0.82        0.96\n",
+            "        butternut squash          50         0.7        0.94\n",
+            "                cucumber          50         0.6        0.96\n",
+            "               artichoke          50        0.84        0.94\n",
+            "             bell pepper          50        0.84        0.98\n",
+            "                 cardoon          50        0.88        0.94\n",
+            "                mushroom          50        0.38        0.92\n",
+            "            Granny Smith          50         0.9        0.96\n",
+            "              strawberry          50         0.6        0.88\n",
+            "                  orange          50         0.7        0.92\n",
+            "                   lemon          50        0.78        0.98\n",
+            "                     fig          50        0.82        0.96\n",
+            "               pineapple          50        0.86        0.96\n",
+            "                  banana          50        0.84        0.96\n",
+            "               jackfruit          50         0.9        0.98\n",
+            "           custard apple          50        0.86        0.96\n",
+            "             pomegranate          50        0.82        0.98\n",
+            "                     hay          50         0.8        0.92\n",
+            "               carbonara          50        0.88        0.94\n",
+            "         chocolate syrup          50        0.46        0.84\n",
+            "                   dough          50         0.4         0.6\n",
+            "                meatloaf          50        0.58        0.84\n",
+            "                   pizza          50        0.84        0.96\n",
+            "                 pot pie          50        0.68         0.9\n",
+            "                 burrito          50         0.8        0.98\n",
+            "                red wine          50        0.54        0.82\n",
+            "                espresso          50        0.64        0.88\n",
+            "                     cup          50        0.38         0.7\n",
+            "                  eggnog          50        0.38         0.7\n",
+            "                     alp          50        0.54        0.88\n",
+            "                  bubble          50         0.8        0.96\n",
+            "                   cliff          50        0.64           1\n",
+            "              coral reef          50        0.72        0.96\n",
+            "                  geyser          50        0.94           1\n",
+            "               lakeshore          50        0.54        0.88\n",
+            "              promontory          50        0.58        0.94\n",
+            "                   shoal          50         0.6        0.96\n",
+            "                seashore          50        0.44        0.78\n",
+            "                  valley          50        0.72        0.94\n",
+            "                 volcano          50        0.78        0.96\n",
+            "         baseball player          50        0.72        0.94\n",
+            "              bridegroom          50        0.72        0.88\n",
+            "             scuba diver          50         0.8           1\n",
+            "                rapeseed          50        0.94        0.98\n",
+            "                   daisy          50        0.96        0.98\n",
+            "   yellow lady's slipper          50           1           1\n",
+            "                    corn          50         0.4        0.88\n",
+            "                   acorn          50        0.92        0.98\n",
+            "                rose hip          50        0.92        0.98\n",
+            "     horse chestnut seed          50        0.94        0.98\n",
+            "            coral fungus          50        0.96        0.96\n",
+            "                  agaric          50        0.82        0.94\n",
+            "               gyromitra          50        0.98           1\n",
+            "      stinkhorn mushroom          50         0.8        0.94\n",
+            "              earth star          50        0.98           1\n",
+            "        hen-of-the-woods          50         0.8        0.96\n",
+            "                  bolete          50        0.74        0.94\n",
+            "                     ear          50        0.48        0.94\n",
+            "            toilet paper          50        0.36        0.68\n",
+            "Speed: 0.1ms pre-process, 0.3ms inference, 0.0ms post-process per image at shape (1, 3, 224, 224)\n",
+            "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n"
+          ]
+        }
+      ],
+      "source": [
+        "# Validate YOLOv5s on Imagenet val\n",
+        "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 --half"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "ZY2VXXXu74w5"
+      },
+      "source": [
+        "# 3. Train\n",
+        "\n",
+        "<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"1000\" src=\"https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png\"/></a></p>\n",
+        "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n",
+        "<br><br>\n",
+        "\n",
+        "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n",
+        "\n",
+        "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n",
+        "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n",
+        "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n",
+        "<br><br>\n",
+        "\n",
+        "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n",
+        "\n",
+        "## Train on Custom Data with Roboflow 🌟 NEW\n",
+        "\n",
+        "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n",
+        "\n",
+        "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n",
+        "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n",
+        "<br>\n",
+        "\n",
+        "<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"480\" src=\"https://user-images.githubusercontent.com/26833433/202802162-92e60571-ab58-4409-948d-b31fddcd3c6f.png\"/></a></p>Label images lightning fast (including with model-assisted labeling)"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "i3oKtE4g-aNn"
+      },
+      "outputs": [],
+      "source": [
+        "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n",
+        "logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\n",
+        "\n",
+        "if logger == 'Comet':\n",
+        "  %pip install -q comet_ml\n",
+        "  import comet_ml; comet_ml.init()\n",
+        "elif logger == 'ClearML':\n",
+        "  %pip install -q clearml\n",
+        "  import clearml; clearml.browser_login()\n",
+        "elif logger == 'TensorBoard':\n",
+        "  %load_ext tensorboard\n",
+        "  %tensorboard --logdir runs/train"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "1NcFxRcFdJ_O",
+        "outputId": "77c8d487-16db-4073-b3ea-06cabf2e7766"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=5, batch_size=64, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n",
+            "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
+            "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
+            "\n",
+            "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n",
+            "\n",
+            "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n",
+            "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n",
+            "100% 103M/103M [00:00<00:00, 347MB/s] \n",
+            "Unzipping /content/datasets/imagenette160.zip...\n",
+            "Dataset download success ✅ (3.3s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n",
+            "\n",
+            "\u001b[34m\u001b[1malbumentations: \u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\n",
+            "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n",
+            "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n",
+            "Image sizes 224 train, 224 test\n",
+            "Using 1 dataloader workers\n",
+            "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n",
+            "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 5 epochs...\n",
+            "\n",
+            "     Epoch   GPU_mem  train_loss    val_loss    top1_acc    top5_acc\n",
+            "       1/5     1.47G        1.05       0.974       0.828       0.975: 100% 148/148 [00:38<00:00,  3.82it/s]\n",
+            "       2/5     1.73G       0.895       0.766       0.911       0.994: 100% 148/148 [00:36<00:00,  4.03it/s]\n",
+            "       3/5     1.73G        0.82       0.704       0.934       0.996: 100% 148/148 [00:35<00:00,  4.20it/s]\n",
+            "       4/5     1.73G       0.766       0.664       0.951       0.998: 100% 148/148 [00:36<00:00,  4.05it/s]\n",
+            "       5/5     1.73G       0.724       0.634       0.959       0.997: 100% 148/148 [00:37<00:00,  3.94it/s]\n",
+            "\n",
+            "Training complete (0.052 hours)\n",
+            "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n",
+            "Predict:         python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n",
+            "Validate:        python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\n",
+            "Export:          python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n",
+            "PyTorch Hub:     model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n",
+            "Visualize:       https://netron.app\n",
+            "\n"
+          ]
+        }
+      ],
+      "source": [
+        "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n",
+        "!python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 --cache"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "15glLzbQx5u0"
+      },
+      "source": [
+        "# 4. Visualize"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "nWOsI5wJR1o3"
+      },
+      "source": [
+        "## Comet Logging and Visualization 🌟 NEW\n",
+        "\n",
+        "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n",
+        "\n",
+        "Getting started is easy:\n",
+        "```shell\n",
+        "pip install comet_ml  # 1. install\n",
+        "export COMET_API_KEY=<Your API Key>  # 2. paste API key\n",
+        "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt  # 3. train\n",
+        "```\n",
+        "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n",
+        "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n",
+        "\n",
+        "<a href=\"https://bit.ly/yolov5-readme-comet2\">\n",
+        "<img alt=\"Comet Dashboard\" src=\"https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png\" width=\"1280\"/></a>"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "Lay2WsTjNJzP"
+      },
+      "source": [
+        "## ClearML Logging and Automation 🌟 NEW\n",
+        "\n",
+        "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n",
+        "\n",
+        "- `pip install clearml`\n",
+        "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n",
+        "\n",
+        "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n",
+        "\n",
+        "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n",
+        "\n",
+        "<a href=\"https://cutt.ly/yolov5-notebook-clearml\">\n",
+        "<img alt=\"ClearML Experiment Management UI\" src=\"https://github.com/thepycoder/clearml_screenshots/raw/main/scalars.jpg\" width=\"1280\"/></a>"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "-WPvRbS5Swl6"
+      },
+      "source": [
+        "## Local Logging\n",
+        "\n",
+        "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
+        "\n",
+        "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n",
+        "\n",
+        "<img alt=\"Local logging results\" src=\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\" width=\"1280\"/>\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "Zelyeqbyt3GD"
+      },
+      "source": [
+        "# Environments\n",
+        "\n",
+        "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
+        "\n",
+        "- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
+        "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n",
+        "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n",
+        "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "6Qu7Iesl0p54"
+      },
+      "source": [
+        "# Status\n",
+        "\n",
+        "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n",
+        "\n",
+        "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "IEijrePND_2I"
+      },
+      "source": [
+        "# Appendix\n",
+        "\n",
+        "Additional content below."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "GMusP4OAxFu6"
+      },
+      "outputs": [],
+      "source": [
+        "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
+        "import torch\n",
+        "\n",
+        "model = torch.hub.load('ultralytics/yolov5', 'yolov5s')  # yolov5n - yolov5x6 or custom\n",
+        "im = 'https://ultralytics.com/images/zidane.jpg'  # file, Path, PIL.Image, OpenCV, nparray, list\n",
+        "results = model(im)  # inference\n",
+        "results.print()  # or .show(), .save(), .crop(), .pandas(), etc."
+      ]
+    }
+  ],
+  "metadata": {
+    "accelerator": "GPU",
+    "colab": {
+      "name": "YOLOv5 Classification Tutorial",
+      "provenance": []
+    },
+    "kernelspec": {
+      "display_name": "Python 3 (ipykernel)",
+      "language": "python",
+      "name": "python3"
+    },
+    "language_info": {
+      "codemirror_mode": {
+        "name": "ipython",
+        "version": 3
+      },
+      "file_extension": ".py",
+      "mimetype": "text/x-python",
+      "name": "python",
+      "nbconvert_exporter": "python",
+      "pygments_lexer": "ipython3",
+      "version": "3.7.12"
+    }
+  },
+  "nbformat": 4,
+  "nbformat_minor": 0
+}

+ 170 - 0
classify/val.py

@@ -0,0 +1,170 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Validate a trained YOLOv5 classification model on a classification dataset
+
+Usage:
+    $ bash data/scripts/get_imagenet.sh --val  # download ImageNet val split (6.3G, 50000 images)
+    $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224  # validate ImageNet
+
+Usage - formats:
+    $ python classify/val.py --weights yolov5s-cls.pt                 # PyTorch
+                                       yolov5s-cls.torchscript        # TorchScript
+                                       yolov5s-cls.onnx               # ONNX Runtime or OpenCV DNN with --dnn
+                                       yolov5s-cls_openvino_model     # OpenVINO
+                                       yolov5s-cls.engine             # TensorRT
+                                       yolov5s-cls.mlmodel            # CoreML (macOS-only)
+                                       yolov5s-cls_saved_model        # TensorFlow SavedModel
+                                       yolov5s-cls.pb                 # TensorFlow GraphDef
+                                       yolov5s-cls.tflite             # TensorFlow Lite
+                                       yolov5s-cls_edgetpu.tflite     # TensorFlow Edge TPU
+                                       yolov5s-cls_paddle_model       # PaddlePaddle
+"""
+
+import argparse
+import os
+import sys
+from pathlib import Path
+
+import torch
+from tqdm import tqdm
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+
+from models.common import DetectMultiBackend
+from utils.dataloaders import create_classification_dataloader
+from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr,
+                           increment_path, print_args)
+from utils.torch_utils import select_device, smart_inference_mode
+
+
+@smart_inference_mode()
+def run(
+    data=ROOT / '../datasets/mnist',  # dataset dir
+    weights=ROOT / 'yolov5s-cls.pt',  # model.pt path(s)
+    batch_size=128,  # batch size
+    imgsz=224,  # inference size (pixels)
+    device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
+    workers=8,  # max dataloader workers (per RANK in DDP mode)
+    verbose=False,  # verbose output
+    project=ROOT / 'runs/val-cls',  # save to project/name
+    name='exp',  # save to project/name
+    exist_ok=False,  # existing project/name ok, do not increment
+    half=False,  # use FP16 half-precision inference
+    dnn=False,  # use OpenCV DNN for ONNX inference
+    model=None,
+    dataloader=None,
+    criterion=None,
+    pbar=None,
+):
+    # Initialize/load model and set device
+    training = model is not None
+    if training:  # called by train.py
+        device, pt, jit, engine = next(model.parameters()).device, True, False, False  # get model device, PyTorch model
+        half &= device.type != 'cpu'  # half precision only supported on CUDA
+        model.half() if half else model.float()
+    else:  # called directly
+        device = select_device(device, batch_size=batch_size)
+
+        # Directories
+        save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
+        save_dir.mkdir(parents=True, exist_ok=True)  # make dir
+
+        # Load model
+        model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)
+        stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
+        imgsz = check_img_size(imgsz, s=stride)  # check image size
+        half = model.fp16  # FP16 supported on limited backends with CUDA
+        if engine:
+            batch_size = model.batch_size
+        else:
+            device = model.device
+            if not (pt or jit):
+                batch_size = 1  # export.py models default to batch-size 1
+                LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
+
+        # Dataloader
+        data = Path(data)
+        test_dir = data / 'test' if (data / 'test').exists() else data / 'val'  # data/test or data/val
+        dataloader = create_classification_dataloader(path=test_dir,
+                                                      imgsz=imgsz,
+                                                      batch_size=batch_size,
+                                                      augment=False,
+                                                      rank=-1,
+                                                      workers=workers)
+
+    model.eval()
+    pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile())
+    n = len(dataloader)  # number of batches
+    action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing'
+    desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}'
+    bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
+    with torch.cuda.amp.autocast(enabled=device.type != 'cpu'):
+        for images, labels in bar:
+            with dt[0]:
+                images, labels = images.to(device, non_blocking=True), labels.to(device)
+
+            with dt[1]:
+                y = model(images)
+
+            with dt[2]:
+                pred.append(y.argsort(1, descending=True)[:, :5])
+                targets.append(labels)
+                if criterion:
+                    loss += criterion(y, labels)
+
+    loss /= n
+    pred, targets = torch.cat(pred), torch.cat(targets)
+    correct = (targets[:, None] == pred).float()
+    acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1)  # (top1, top5) accuracy
+    top1, top5 = acc.mean(0).tolist()
+
+    if pbar:
+        pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}'
+    if verbose:  # all classes
+        LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}")
+        LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}")
+        for i, c in model.names.items():
+            acc_i = acc[targets == i]
+            top1i, top5i = acc_i.mean(0).tolist()
+            LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}')
+
+        # Print results
+        t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt)  # speeds per image
+        shape = (1, 3, imgsz, imgsz)
+        LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t)
+        LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
+
+    return top1, top5, loss
+
+
+def parse_opt():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path')
+    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)')
+    parser.add_argument('--batch-size', type=int, default=128, help='batch size')
+    parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)')
+    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+    parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
+    parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output')
+    parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name')
+    parser.add_argument('--name', default='exp', help='save to project/name')
+    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+    parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
+    parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
+    opt = parser.parse_args()
+    print_args(vars(opt))
+    return opt
+
+
+def main(opt):
+    check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+    run(**vars(opt))
+
+
+if __name__ == '__main__':
+    opt = parse_opt()
+    main(opt)

+ 74 - 0
data/Argoverse.yaml

@@ -0,0 +1,74 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
+# Example usage: python train.py --data Argoverse.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── Argoverse  ← downloads here (31.3 GB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/Argoverse  # dataset root dir
+train: Argoverse-1.1/images/train/  # train images (relative to 'path') 39384 images
+val: Argoverse-1.1/images/val/  # val images (relative to 'path') 15062 images
+test: Argoverse-1.1/images/test/  # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
+
+# Classes
+names:
+  0: person
+  1: bicycle
+  2: car
+  3: motorcycle
+  4: bus
+  5: truck
+  6: traffic_light
+  7: stop_sign
+
+
+# Download script/URL (optional) ---------------------------------------------------------------------------------------
+download: |
+  import json
+
+  from tqdm import tqdm
+  from utils.general import download, Path
+
+
+  def argoverse2yolo(set):
+      labels = {}
+      a = json.load(open(set, "rb"))
+      for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
+          img_id = annot['image_id']
+          img_name = a['images'][img_id]['name']
+          img_label_name = f'{img_name[:-3]}txt'
+
+          cls = annot['category_id']  # instance class id
+          x_center, y_center, width, height = annot['bbox']
+          x_center = (x_center + width / 2) / 1920.0  # offset and scale
+          y_center = (y_center + height / 2) / 1200.0  # offset and scale
+          width /= 1920.0  # scale
+          height /= 1200.0  # scale
+
+          img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
+          if not img_dir.exists():
+              img_dir.mkdir(parents=True, exist_ok=True)
+
+          k = str(img_dir / img_label_name)
+          if k not in labels:
+              labels[k] = []
+          labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
+
+      for k in labels:
+          with open(k, "w") as f:
+              f.writelines(labels[k])
+
+
+  # Download
+  dir = Path(yaml['path'])  # dataset root dir
+  urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
+  download(urls, dir=dir, delete=False)
+
+  # Convert
+  annotations_dir = 'Argoverse-HD/annotations/'
+  (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images')  # rename 'tracking' to 'images'
+  for d in "train.json", "val.json":
+      argoverse2yolo(dir / annotations_dir / d)  # convert VisDrone annotations to YOLO labels

+ 54 - 0
data/GlobalWheat2020.yaml

@@ -0,0 +1,54 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
+# Example usage: python train.py --data GlobalWheat2020.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── GlobalWheat2020  ← downloads here (7.0 GB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/GlobalWheat2020  # dataset root dir
+train: # train images (relative to 'path') 3422 images
+  - images/arvalis_1
+  - images/arvalis_2
+  - images/arvalis_3
+  - images/ethz_1
+  - images/rres_1
+  - images/inrae_1
+  - images/usask_1
+val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
+  - images/ethz_1
+test: # test images (optional) 1276 images
+  - images/utokyo_1
+  - images/utokyo_2
+  - images/nau_1
+  - images/uq_1
+
+# Classes
+names:
+  0: wheat_head
+
+
+# Download script/URL (optional) ---------------------------------------------------------------------------------------
+download: |
+  from utils.general import download, Path
+
+
+  # Download
+  dir = Path(yaml['path'])  # dataset root dir
+  urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
+          'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
+  download(urls, dir=dir)
+
+  # Make Directories
+  for p in 'annotations', 'images', 'labels':
+      (dir / p).mkdir(parents=True, exist_ok=True)
+
+  # Move
+  for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
+           'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
+      (dir / p).rename(dir / 'images' / p)  # move to /images
+      f = (dir / p).with_suffix('.json')  # json file
+      if f.exists():
+          f.rename((dir / 'annotations' / p).with_suffix('.json'))  # move to /annotations

+ 1022 - 0
data/ImageNet.yaml

@@ -0,0 +1,1022 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University
+# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels
+# Example usage: python classify/train.py --data imagenet
+# parent
+# ├── yolov5
+# └── datasets
+#     └── imagenet  ← downloads here (144 GB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/imagenet  # dataset root dir
+train: train  # train images (relative to 'path') 1281167 images
+val: val  # val images (relative to 'path') 50000 images
+test:  # test images (optional)
+
+# Classes
+names:
+  0: tench
+  1: goldfish
+  2: great white shark
+  3: tiger shark
+  4: hammerhead shark
+  5: electric ray
+  6: stingray
+  7: cock
+  8: hen
+  9: ostrich
+  10: brambling
+  11: goldfinch
+  12: house finch
+  13: junco
+  14: indigo bunting
+  15: American robin
+  16: bulbul
+  17: jay
+  18: magpie
+  19: chickadee
+  20: American dipper
+  21: kite
+  22: bald eagle
+  23: vulture
+  24: great grey owl
+  25: fire salamander
+  26: smooth newt
+  27: newt
+  28: spotted salamander
+  29: axolotl
+  30: American bullfrog
+  31: tree frog
+  32: tailed frog
+  33: loggerhead sea turtle
+  34: leatherback sea turtle
+  35: mud turtle
+  36: terrapin
+  37: box turtle
+  38: banded gecko
+  39: green iguana
+  40: Carolina anole
+  41: desert grassland whiptail lizard
+  42: agama
+  43: frilled-necked lizard
+  44: alligator lizard
+  45: Gila monster
+  46: European green lizard
+  47: chameleon
+  48: Komodo dragon
+  49: Nile crocodile
+  50: American alligator
+  51: triceratops
+  52: worm snake
+  53: ring-necked snake
+  54: eastern hog-nosed snake
+  55: smooth green snake
+  56: kingsnake
+  57: garter snake
+  58: water snake
+  59: vine snake
+  60: night snake
+  61: boa constrictor
+  62: African rock python
+  63: Indian cobra
+  64: green mamba
+  65: sea snake
+  66: Saharan horned viper
+  67: eastern diamondback rattlesnake
+  68: sidewinder
+  69: trilobite
+  70: harvestman
+  71: scorpion
+  72: yellow garden spider
+  73: barn spider
+  74: European garden spider
+  75: southern black widow
+  76: tarantula
+  77: wolf spider
+  78: tick
+  79: centipede
+  80: black grouse
+  81: ptarmigan
+  82: ruffed grouse
+  83: prairie grouse
+  84: peacock
+  85: quail
+  86: partridge
+  87: grey parrot
+  88: macaw
+  89: sulphur-crested cockatoo
+  90: lorikeet
+  91: coucal
+  92: bee eater
+  93: hornbill
+  94: hummingbird
+  95: jacamar
+  96: toucan
+  97: duck
+  98: red-breasted merganser
+  99: goose
+  100: black swan
+  101: tusker
+  102: echidna
+  103: platypus
+  104: wallaby
+  105: koala
+  106: wombat
+  107: jellyfish
+  108: sea anemone
+  109: brain coral
+  110: flatworm
+  111: nematode
+  112: conch
+  113: snail
+  114: slug
+  115: sea slug
+  116: chiton
+  117: chambered nautilus
+  118: Dungeness crab
+  119: rock crab
+  120: fiddler crab
+  121: red king crab
+  122: American lobster
+  123: spiny lobster
+  124: crayfish
+  125: hermit crab
+  126: isopod
+  127: white stork
+  128: black stork
+  129: spoonbill
+  130: flamingo
+  131: little blue heron
+  132: great egret
+  133: bittern
+  134: crane (bird)
+  135: limpkin
+  136: common gallinule
+  137: American coot
+  138: bustard
+  139: ruddy turnstone
+  140: dunlin
+  141: common redshank
+  142: dowitcher
+  143: oystercatcher
+  144: pelican
+  145: king penguin
+  146: albatross
+  147: grey whale
+  148: killer whale
+  149: dugong
+  150: sea lion
+  151: Chihuahua
+  152: Japanese Chin
+  153: Maltese
+  154: Pekingese
+  155: Shih Tzu
+  156: King Charles Spaniel
+  157: Papillon
+  158: toy terrier
+  159: Rhodesian Ridgeback
+  160: Afghan Hound
+  161: Basset Hound
+  162: Beagle
+  163: Bloodhound
+  164: Bluetick Coonhound
+  165: Black and Tan Coonhound
+  166: Treeing Walker Coonhound
+  167: English foxhound
+  168: Redbone Coonhound
+  169: borzoi
+  170: Irish Wolfhound
+  171: Italian Greyhound
+  172: Whippet
+  173: Ibizan Hound
+  174: Norwegian Elkhound
+  175: Otterhound
+  176: Saluki
+  177: Scottish Deerhound
+  178: Weimaraner
+  179: Staffordshire Bull Terrier
+  180: American Staffordshire Terrier
+  181: Bedlington Terrier
+  182: Border Terrier
+  183: Kerry Blue Terrier
+  184: Irish Terrier
+  185: Norfolk Terrier
+  186: Norwich Terrier
+  187: Yorkshire Terrier
+  188: Wire Fox Terrier
+  189: Lakeland Terrier
+  190: Sealyham Terrier
+  191: Airedale Terrier
+  192: Cairn Terrier
+  193: Australian Terrier
+  194: Dandie Dinmont Terrier
+  195: Boston Terrier
+  196: Miniature Schnauzer
+  197: Giant Schnauzer
+  198: Standard Schnauzer
+  199: Scottish Terrier
+  200: Tibetan Terrier
+  201: Australian Silky Terrier
+  202: Soft-coated Wheaten Terrier
+  203: West Highland White Terrier
+  204: Lhasa Apso
+  205: Flat-Coated Retriever
+  206: Curly-coated Retriever
+  207: Golden Retriever
+  208: Labrador Retriever
+  209: Chesapeake Bay Retriever
+  210: German Shorthaired Pointer
+  211: Vizsla
+  212: English Setter
+  213: Irish Setter
+  214: Gordon Setter
+  215: Brittany
+  216: Clumber Spaniel
+  217: English Springer Spaniel
+  218: Welsh Springer Spaniel
+  219: Cocker Spaniels
+  220: Sussex Spaniel
+  221: Irish Water Spaniel
+  222: Kuvasz
+  223: Schipperke
+  224: Groenendael
+  225: Malinois
+  226: Briard
+  227: Australian Kelpie
+  228: Komondor
+  229: Old English Sheepdog
+  230: Shetland Sheepdog
+  231: collie
+  232: Border Collie
+  233: Bouvier des Flandres
+  234: Rottweiler
+  235: German Shepherd Dog
+  236: Dobermann
+  237: Miniature Pinscher
+  238: Greater Swiss Mountain Dog
+  239: Bernese Mountain Dog
+  240: Appenzeller Sennenhund
+  241: Entlebucher Sennenhund
+  242: Boxer
+  243: Bullmastiff
+  244: Tibetan Mastiff
+  245: French Bulldog
+  246: Great Dane
+  247: St. Bernard
+  248: husky
+  249: Alaskan Malamute
+  250: Siberian Husky
+  251: Dalmatian
+  252: Affenpinscher
+  253: Basenji
+  254: pug
+  255: Leonberger
+  256: Newfoundland
+  257: Pyrenean Mountain Dog
+  258: Samoyed
+  259: Pomeranian
+  260: Chow Chow
+  261: Keeshond
+  262: Griffon Bruxellois
+  263: Pembroke Welsh Corgi
+  264: Cardigan Welsh Corgi
+  265: Toy Poodle
+  266: Miniature Poodle
+  267: Standard Poodle
+  268: Mexican hairless dog
+  269: grey wolf
+  270: Alaskan tundra wolf
+  271: red wolf
+  272: coyote
+  273: dingo
+  274: dhole
+  275: African wild dog
+  276: hyena
+  277: red fox
+  278: kit fox
+  279: Arctic fox
+  280: grey fox
+  281: tabby cat
+  282: tiger cat
+  283: Persian cat
+  284: Siamese cat
+  285: Egyptian Mau
+  286: cougar
+  287: lynx
+  288: leopard
+  289: snow leopard
+  290: jaguar
+  291: lion
+  292: tiger
+  293: cheetah
+  294: brown bear
+  295: American black bear
+  296: polar bear
+  297: sloth bear
+  298: mongoose
+  299: meerkat
+  300: tiger beetle
+  301: ladybug
+  302: ground beetle
+  303: longhorn beetle
+  304: leaf beetle
+  305: dung beetle
+  306: rhinoceros beetle
+  307: weevil
+  308: fly
+  309: bee
+  310: ant
+  311: grasshopper
+  312: cricket
+  313: stick insect
+  314: cockroach
+  315: mantis
+  316: cicada
+  317: leafhopper
+  318: lacewing
+  319: dragonfly
+  320: damselfly
+  321: red admiral
+  322: ringlet
+  323: monarch butterfly
+  324: small white
+  325: sulphur butterfly
+  326: gossamer-winged butterfly
+  327: starfish
+  328: sea urchin
+  329: sea cucumber
+  330: cottontail rabbit
+  331: hare
+  332: Angora rabbit
+  333: hamster
+  334: porcupine
+  335: fox squirrel
+  336: marmot
+  337: beaver
+  338: guinea pig
+  339: common sorrel
+  340: zebra
+  341: pig
+  342: wild boar
+  343: warthog
+  344: hippopotamus
+  345: ox
+  346: water buffalo
+  347: bison
+  348: ram
+  349: bighorn sheep
+  350: Alpine ibex
+  351: hartebeest
+  352: impala
+  353: gazelle
+  354: dromedary
+  355: llama
+  356: weasel
+  357: mink
+  358: European polecat
+  359: black-footed ferret
+  360: otter
+  361: skunk
+  362: badger
+  363: armadillo
+  364: three-toed sloth
+  365: orangutan
+  366: gorilla
+  367: chimpanzee
+  368: gibbon
+  369: siamang
+  370: guenon
+  371: patas monkey
+  372: baboon
+  373: macaque
+  374: langur
+  375: black-and-white colobus
+  376: proboscis monkey
+  377: marmoset
+  378: white-headed capuchin
+  379: howler monkey
+  380: titi
+  381: Geoffroy's spider monkey
+  382: common squirrel monkey
+  383: ring-tailed lemur
+  384: indri
+  385: Asian elephant
+  386: African bush elephant
+  387: red panda
+  388: giant panda
+  389: snoek
+  390: eel
+  391: coho salmon
+  392: rock beauty
+  393: clownfish
+  394: sturgeon
+  395: garfish
+  396: lionfish
+  397: pufferfish
+  398: abacus
+  399: abaya
+  400: academic gown
+  401: accordion
+  402: acoustic guitar
+  403: aircraft carrier
+  404: airliner
+  405: airship
+  406: altar
+  407: ambulance
+  408: amphibious vehicle
+  409: analog clock
+  410: apiary
+  411: apron
+  412: waste container
+  413: assault rifle
+  414: backpack
+  415: bakery
+  416: balance beam
+  417: balloon
+  418: ballpoint pen
+  419: Band-Aid
+  420: banjo
+  421: baluster
+  422: barbell
+  423: barber chair
+  424: barbershop
+  425: barn
+  426: barometer
+  427: barrel
+  428: wheelbarrow
+  429: baseball
+  430: basketball
+  431: bassinet
+  432: bassoon
+  433: swimming cap
+  434: bath towel
+  435: bathtub
+  436: station wagon
+  437: lighthouse
+  438: beaker
+  439: military cap
+  440: beer bottle
+  441: beer glass
+  442: bell-cot
+  443: bib
+  444: tandem bicycle
+  445: bikini
+  446: ring binder
+  447: binoculars
+  448: birdhouse
+  449: boathouse
+  450: bobsleigh
+  451: bolo tie
+  452: poke bonnet
+  453: bookcase
+  454: bookstore
+  455: bottle cap
+  456: bow
+  457: bow tie
+  458: brass
+  459: bra
+  460: breakwater
+  461: breastplate
+  462: broom
+  463: bucket
+  464: buckle
+  465: bulletproof vest
+  466: high-speed train
+  467: butcher shop
+  468: taxicab
+  469: cauldron
+  470: candle
+  471: cannon
+  472: canoe
+  473: can opener
+  474: cardigan
+  475: car mirror
+  476: carousel
+  477: tool kit
+  478: carton
+  479: car wheel
+  480: automated teller machine
+  481: cassette
+  482: cassette player
+  483: castle
+  484: catamaran
+  485: CD player
+  486: cello
+  487: mobile phone
+  488: chain
+  489: chain-link fence
+  490: chain mail
+  491: chainsaw
+  492: chest
+  493: chiffonier
+  494: chime
+  495: china cabinet
+  496: Christmas stocking
+  497: church
+  498: movie theater
+  499: cleaver
+  500: cliff dwelling
+  501: cloak
+  502: clogs
+  503: cocktail shaker
+  504: coffee mug
+  505: coffeemaker
+  506: coil
+  507: combination lock
+  508: computer keyboard
+  509: confectionery store
+  510: container ship
+  511: convertible
+  512: corkscrew
+  513: cornet
+  514: cowboy boot
+  515: cowboy hat
+  516: cradle
+  517: crane (machine)
+  518: crash helmet
+  519: crate
+  520: infant bed
+  521: Crock Pot
+  522: croquet ball
+  523: crutch
+  524: cuirass
+  525: dam
+  526: desk
+  527: desktop computer
+  528: rotary dial telephone
+  529: diaper
+  530: digital clock
+  531: digital watch
+  532: dining table
+  533: dishcloth
+  534: dishwasher
+  535: disc brake
+  536: dock
+  537: dog sled
+  538: dome
+  539: doormat
+  540: drilling rig
+  541: drum
+  542: drumstick
+  543: dumbbell
+  544: Dutch oven
+  545: electric fan
+  546: electric guitar
+  547: electric locomotive
+  548: entertainment center
+  549: envelope
+  550: espresso machine
+  551: face powder
+  552: feather boa
+  553: filing cabinet
+  554: fireboat
+  555: fire engine
+  556: fire screen sheet
+  557: flagpole
+  558: flute
+  559: folding chair
+  560: football helmet
+  561: forklift
+  562: fountain
+  563: fountain pen
+  564: four-poster bed
+  565: freight car
+  566: French horn
+  567: frying pan
+  568: fur coat
+  569: garbage truck
+  570: gas mask
+  571: gas pump
+  572: goblet
+  573: go-kart
+  574: golf ball
+  575: golf cart
+  576: gondola
+  577: gong
+  578: gown
+  579: grand piano
+  580: greenhouse
+  581: grille
+  582: grocery store
+  583: guillotine
+  584: barrette
+  585: hair spray
+  586: half-track
+  587: hammer
+  588: hamper
+  589: hair dryer
+  590: hand-held computer
+  591: handkerchief
+  592: hard disk drive
+  593: harmonica
+  594: harp
+  595: harvester
+  596: hatchet
+  597: holster
+  598: home theater
+  599: honeycomb
+  600: hook
+  601: hoop skirt
+  602: horizontal bar
+  603: horse-drawn vehicle
+  604: hourglass
+  605: iPod
+  606: clothes iron
+  607: jack-o'-lantern
+  608: jeans
+  609: jeep
+  610: T-shirt
+  611: jigsaw puzzle
+  612: pulled rickshaw
+  613: joystick
+  614: kimono
+  615: knee pad
+  616: knot
+  617: lab coat
+  618: ladle
+  619: lampshade
+  620: laptop computer
+  621: lawn mower
+  622: lens cap
+  623: paper knife
+  624: library
+  625: lifeboat
+  626: lighter
+  627: limousine
+  628: ocean liner
+  629: lipstick
+  630: slip-on shoe
+  631: lotion
+  632: speaker
+  633: loupe
+  634: sawmill
+  635: magnetic compass
+  636: mail bag
+  637: mailbox
+  638: tights
+  639: tank suit
+  640: manhole cover
+  641: maraca
+  642: marimba
+  643: mask
+  644: match
+  645: maypole
+  646: maze
+  647: measuring cup
+  648: medicine chest
+  649: megalith
+  650: microphone
+  651: microwave oven
+  652: military uniform
+  653: milk can
+  654: minibus
+  655: miniskirt
+  656: minivan
+  657: missile
+  658: mitten
+  659: mixing bowl
+  660: mobile home
+  661: Model T
+  662: modem
+  663: monastery
+  664: monitor
+  665: moped
+  666: mortar
+  667: square academic cap
+  668: mosque
+  669: mosquito net
+  670: scooter
+  671: mountain bike
+  672: tent
+  673: computer mouse
+  674: mousetrap
+  675: moving van
+  676: muzzle
+  677: nail
+  678: neck brace
+  679: necklace
+  680: nipple
+  681: notebook computer
+  682: obelisk
+  683: oboe
+  684: ocarina
+  685: odometer
+  686: oil filter
+  687: organ
+  688: oscilloscope
+  689: overskirt
+  690: bullock cart
+  691: oxygen mask
+  692: packet
+  693: paddle
+  694: paddle wheel
+  695: padlock
+  696: paintbrush
+  697: pajamas
+  698: palace
+  699: pan flute
+  700: paper towel
+  701: parachute
+  702: parallel bars
+  703: park bench
+  704: parking meter
+  705: passenger car
+  706: patio
+  707: payphone
+  708: pedestal
+  709: pencil case
+  710: pencil sharpener
+  711: perfume
+  712: Petri dish
+  713: photocopier
+  714: plectrum
+  715: Pickelhaube
+  716: picket fence
+  717: pickup truck
+  718: pier
+  719: piggy bank
+  720: pill bottle
+  721: pillow
+  722: ping-pong ball
+  723: pinwheel
+  724: pirate ship
+  725: pitcher
+  726: hand plane
+  727: planetarium
+  728: plastic bag
+  729: plate rack
+  730: plow
+  731: plunger
+  732: Polaroid camera
+  733: pole
+  734: police van
+  735: poncho
+  736: billiard table
+  737: soda bottle
+  738: pot
+  739: potter's wheel
+  740: power drill
+  741: prayer rug
+  742: printer
+  743: prison
+  744: projectile
+  745: projector
+  746: hockey puck
+  747: punching bag
+  748: purse
+  749: quill
+  750: quilt
+  751: race car
+  752: racket
+  753: radiator
+  754: radio
+  755: radio telescope
+  756: rain barrel
+  757: recreational vehicle
+  758: reel
+  759: reflex camera
+  760: refrigerator
+  761: remote control
+  762: restaurant
+  763: revolver
+  764: rifle
+  765: rocking chair
+  766: rotisserie
+  767: eraser
+  768: rugby ball
+  769: ruler
+  770: running shoe
+  771: safe
+  772: safety pin
+  773: salt shaker
+  774: sandal
+  775: sarong
+  776: saxophone
+  777: scabbard
+  778: weighing scale
+  779: school bus
+  780: schooner
+  781: scoreboard
+  782: CRT screen
+  783: screw
+  784: screwdriver
+  785: seat belt
+  786: sewing machine
+  787: shield
+  788: shoe store
+  789: shoji
+  790: shopping basket
+  791: shopping cart
+  792: shovel
+  793: shower cap
+  794: shower curtain
+  795: ski
+  796: ski mask
+  797: sleeping bag
+  798: slide rule
+  799: sliding door
+  800: slot machine
+  801: snorkel
+  802: snowmobile
+  803: snowplow
+  804: soap dispenser
+  805: soccer ball
+  806: sock
+  807: solar thermal collector
+  808: sombrero
+  809: soup bowl
+  810: space bar
+  811: space heater
+  812: space shuttle
+  813: spatula
+  814: motorboat
+  815: spider web
+  816: spindle
+  817: sports car
+  818: spotlight
+  819: stage
+  820: steam locomotive
+  821: through arch bridge
+  822: steel drum
+  823: stethoscope
+  824: scarf
+  825: stone wall
+  826: stopwatch
+  827: stove
+  828: strainer
+  829: tram
+  830: stretcher
+  831: couch
+  832: stupa
+  833: submarine
+  834: suit
+  835: sundial
+  836: sunglass
+  837: sunglasses
+  838: sunscreen
+  839: suspension bridge
+  840: mop
+  841: sweatshirt
+  842: swimsuit
+  843: swing
+  844: switch
+  845: syringe
+  846: table lamp
+  847: tank
+  848: tape player
+  849: teapot
+  850: teddy bear
+  851: television
+  852: tennis ball
+  853: thatched roof
+  854: front curtain
+  855: thimble
+  856: threshing machine
+  857: throne
+  858: tile roof
+  859: toaster
+  860: tobacco shop
+  861: toilet seat
+  862: torch
+  863: totem pole
+  864: tow truck
+  865: toy store
+  866: tractor
+  867: semi-trailer truck
+  868: tray
+  869: trench coat
+  870: tricycle
+  871: trimaran
+  872: tripod
+  873: triumphal arch
+  874: trolleybus
+  875: trombone
+  876: tub
+  877: turnstile
+  878: typewriter keyboard
+  879: umbrella
+  880: unicycle
+  881: upright piano
+  882: vacuum cleaner
+  883: vase
+  884: vault
+  885: velvet
+  886: vending machine
+  887: vestment
+  888: viaduct
+  889: violin
+  890: volleyball
+  891: waffle iron
+  892: wall clock
+  893: wallet
+  894: wardrobe
+  895: military aircraft
+  896: sink
+  897: washing machine
+  898: water bottle
+  899: water jug
+  900: water tower
+  901: whiskey jug
+  902: whistle
+  903: wig
+  904: window screen
+  905: window shade
+  906: Windsor tie
+  907: wine bottle
+  908: wing
+  909: wok
+  910: wooden spoon
+  911: wool
+  912: split-rail fence
+  913: shipwreck
+  914: yawl
+  915: yurt
+  916: website
+  917: comic book
+  918: crossword
+  919: traffic sign
+  920: traffic light
+  921: dust jacket
+  922: menu
+  923: plate
+  924: guacamole
+  925: consomme
+  926: hot pot
+  927: trifle
+  928: ice cream
+  929: ice pop
+  930: baguette
+  931: bagel
+  932: pretzel
+  933: cheeseburger
+  934: hot dog
+  935: mashed potato
+  936: cabbage
+  937: broccoli
+  938: cauliflower
+  939: zucchini
+  940: spaghetti squash
+  941: acorn squash
+  942: butternut squash
+  943: cucumber
+  944: artichoke
+  945: bell pepper
+  946: cardoon
+  947: mushroom
+  948: Granny Smith
+  949: strawberry
+  950: orange
+  951: lemon
+  952: fig
+  953: pineapple
+  954: banana
+  955: jackfruit
+  956: custard apple
+  957: pomegranate
+  958: hay
+  959: carbonara
+  960: chocolate syrup
+  961: dough
+  962: meatloaf
+  963: pizza
+  964: pot pie
+  965: burrito
+  966: red wine
+  967: espresso
+  968: cup
+  969: eggnog
+  970: alp
+  971: bubble
+  972: cliff
+  973: coral reef
+  974: geyser
+  975: lakeshore
+  976: promontory
+  977: shoal
+  978: seashore
+  979: valley
+  980: volcano
+  981: baseball player
+  982: bridegroom
+  983: scuba diver
+  984: rapeseed
+  985: daisy
+  986: yellow lady's slipper
+  987: corn
+  988: acorn
+  989: rose hip
+  990: horse chestnut seed
+  991: coral fungus
+  992: agaric
+  993: gyromitra
+  994: stinkhorn mushroom
+  995: earth star
+  996: hen-of-the-woods
+  997: bolete
+  998: ear
+  999: toilet paper
+
+
+# Download script/URL (optional)
+download: data/scripts/get_imagenet.sh

+ 438 - 0
data/Objects365.yaml

@@ -0,0 +1,438 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Objects365 dataset https://www.objects365.org/ by Megvii
+# Example usage: python train.py --data Objects365.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── Objects365  ← downloads here (712 GB = 367G data + 345G zips)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/Objects365  # dataset root dir
+train: images/train  # train images (relative to 'path') 1742289 images
+val: images/val # val images (relative to 'path') 80000 images
+test:  # test images (optional)
+
+# Classes
+names:
+  0: Person
+  1: Sneakers
+  2: Chair
+  3: Other Shoes
+  4: Hat
+  5: Car
+  6: Lamp
+  7: Glasses
+  8: Bottle
+  9: Desk
+  10: Cup
+  11: Street Lights
+  12: Cabinet/shelf
+  13: Handbag/Satchel
+  14: Bracelet
+  15: Plate
+  16: Picture/Frame
+  17: Helmet
+  18: Book
+  19: Gloves
+  20: Storage box
+  21: Boat
+  22: Leather Shoes
+  23: Flower
+  24: Bench
+  25: Potted Plant
+  26: Bowl/Basin
+  27: Flag
+  28: Pillow
+  29: Boots
+  30: Vase
+  31: Microphone
+  32: Necklace
+  33: Ring
+  34: SUV
+  35: Wine Glass
+  36: Belt
+  37: Monitor/TV
+  38: Backpack
+  39: Umbrella
+  40: Traffic Light
+  41: Speaker
+  42: Watch
+  43: Tie
+  44: Trash bin Can
+  45: Slippers
+  46: Bicycle
+  47: Stool
+  48: Barrel/bucket
+  49: Van
+  50: Couch
+  51: Sandals
+  52: Basket
+  53: Drum
+  54: Pen/Pencil
+  55: Bus
+  56: Wild Bird
+  57: High Heels
+  58: Motorcycle
+  59: Guitar
+  60: Carpet
+  61: Cell Phone
+  62: Bread
+  63: Camera
+  64: Canned
+  65: Truck
+  66: Traffic cone
+  67: Cymbal
+  68: Lifesaver
+  69: Towel
+  70: Stuffed Toy
+  71: Candle
+  72: Sailboat
+  73: Laptop
+  74: Awning
+  75: Bed
+  76: Faucet
+  77: Tent
+  78: Horse
+  79: Mirror
+  80: Power outlet
+  81: Sink
+  82: Apple
+  83: Air Conditioner
+  84: Knife
+  85: Hockey Stick
+  86: Paddle
+  87: Pickup Truck
+  88: Fork
+  89: Traffic Sign
+  90: Balloon
+  91: Tripod
+  92: Dog
+  93: Spoon
+  94: Clock
+  95: Pot
+  96: Cow
+  97: Cake
+  98: Dinning Table
+  99: Sheep
+  100: Hanger
+  101: Blackboard/Whiteboard
+  102: Napkin
+  103: Other Fish
+  104: Orange/Tangerine
+  105: Toiletry
+  106: Keyboard
+  107: Tomato
+  108: Lantern
+  109: Machinery Vehicle
+  110: Fan
+  111: Green Vegetables
+  112: Banana
+  113: Baseball Glove
+  114: Airplane
+  115: Mouse
+  116: Train
+  117: Pumpkin
+  118: Soccer
+  119: Skiboard
+  120: Luggage
+  121: Nightstand
+  122: Tea pot
+  123: Telephone
+  124: Trolley
+  125: Head Phone
+  126: Sports Car
+  127: Stop Sign
+  128: Dessert
+  129: Scooter
+  130: Stroller
+  131: Crane
+  132: Remote
+  133: Refrigerator
+  134: Oven
+  135: Lemon
+  136: Duck
+  137: Baseball Bat
+  138: Surveillance Camera
+  139: Cat
+  140: Jug
+  141: Broccoli
+  142: Piano
+  143: Pizza
+  144: Elephant
+  145: Skateboard
+  146: Surfboard
+  147: Gun
+  148: Skating and Skiing shoes
+  149: Gas stove
+  150: Donut
+  151: Bow Tie
+  152: Carrot
+  153: Toilet
+  154: Kite
+  155: Strawberry
+  156: Other Balls
+  157: Shovel
+  158: Pepper
+  159: Computer Box
+  160: Toilet Paper
+  161: Cleaning Products
+  162: Chopsticks
+  163: Microwave
+  164: Pigeon
+  165: Baseball
+  166: Cutting/chopping Board
+  167: Coffee Table
+  168: Side Table
+  169: Scissors
+  170: Marker
+  171: Pie
+  172: Ladder
+  173: Snowboard
+  174: Cookies
+  175: Radiator
+  176: Fire Hydrant
+  177: Basketball
+  178: Zebra
+  179: Grape
+  180: Giraffe
+  181: Potato
+  182: Sausage
+  183: Tricycle
+  184: Violin
+  185: Egg
+  186: Fire Extinguisher
+  187: Candy
+  188: Fire Truck
+  189: Billiards
+  190: Converter
+  191: Bathtub
+  192: Wheelchair
+  193: Golf Club
+  194: Briefcase
+  195: Cucumber
+  196: Cigar/Cigarette
+  197: Paint Brush
+  198: Pear
+  199: Heavy Truck
+  200: Hamburger
+  201: Extractor
+  202: Extension Cord
+  203: Tong
+  204: Tennis Racket
+  205: Folder
+  206: American Football
+  207: earphone
+  208: Mask
+  209: Kettle
+  210: Tennis
+  211: Ship
+  212: Swing
+  213: Coffee Machine
+  214: Slide
+  215: Carriage
+  216: Onion
+  217: Green beans
+  218: Projector
+  219: Frisbee
+  220: Washing Machine/Drying Machine
+  221: Chicken
+  222: Printer
+  223: Watermelon
+  224: Saxophone
+  225: Tissue
+  226: Toothbrush
+  227: Ice cream
+  228: Hot-air balloon
+  229: Cello
+  230: French Fries
+  231: Scale
+  232: Trophy
+  233: Cabbage
+  234: Hot dog
+  235: Blender
+  236: Peach
+  237: Rice
+  238: Wallet/Purse
+  239: Volleyball
+  240: Deer
+  241: Goose
+  242: Tape
+  243: Tablet
+  244: Cosmetics
+  245: Trumpet
+  246: Pineapple
+  247: Golf Ball
+  248: Ambulance
+  249: Parking meter
+  250: Mango
+  251: Key
+  252: Hurdle
+  253: Fishing Rod
+  254: Medal
+  255: Flute
+  256: Brush
+  257: Penguin
+  258: Megaphone
+  259: Corn
+  260: Lettuce
+  261: Garlic
+  262: Swan
+  263: Helicopter
+  264: Green Onion
+  265: Sandwich
+  266: Nuts
+  267: Speed Limit Sign
+  268: Induction Cooker
+  269: Broom
+  270: Trombone
+  271: Plum
+  272: Rickshaw
+  273: Goldfish
+  274: Kiwi fruit
+  275: Router/modem
+  276: Poker Card
+  277: Toaster
+  278: Shrimp
+  279: Sushi
+  280: Cheese
+  281: Notepaper
+  282: Cherry
+  283: Pliers
+  284: CD
+  285: Pasta
+  286: Hammer
+  287: Cue
+  288: Avocado
+  289: Hamimelon
+  290: Flask
+  291: Mushroom
+  292: Screwdriver
+  293: Soap
+  294: Recorder
+  295: Bear
+  296: Eggplant
+  297: Board Eraser
+  298: Coconut
+  299: Tape Measure/Ruler
+  300: Pig
+  301: Showerhead
+  302: Globe
+  303: Chips
+  304: Steak
+  305: Crosswalk Sign
+  306: Stapler
+  307: Camel
+  308: Formula 1
+  309: Pomegranate
+  310: Dishwasher
+  311: Crab
+  312: Hoverboard
+  313: Meat ball
+  314: Rice Cooker
+  315: Tuba
+  316: Calculator
+  317: Papaya
+  318: Antelope
+  319: Parrot
+  320: Seal
+  321: Butterfly
+  322: Dumbbell
+  323: Donkey
+  324: Lion
+  325: Urinal
+  326: Dolphin
+  327: Electric Drill
+  328: Hair Dryer
+  329: Egg tart
+  330: Jellyfish
+  331: Treadmill
+  332: Lighter
+  333: Grapefruit
+  334: Game board
+  335: Mop
+  336: Radish
+  337: Baozi
+  338: Target
+  339: French
+  340: Spring Rolls
+  341: Monkey
+  342: Rabbit
+  343: Pencil Case
+  344: Yak
+  345: Red Cabbage
+  346: Binoculars
+  347: Asparagus
+  348: Barbell
+  349: Scallop
+  350: Noddles
+  351: Comb
+  352: Dumpling
+  353: Oyster
+  354: Table Tennis paddle
+  355: Cosmetics Brush/Eyeliner Pencil
+  356: Chainsaw
+  357: Eraser
+  358: Lobster
+  359: Durian
+  360: Okra
+  361: Lipstick
+  362: Cosmetics Mirror
+  363: Curling
+  364: Table Tennis
+
+
+# Download script/URL (optional) ---------------------------------------------------------------------------------------
+download: |
+  from tqdm import tqdm
+
+  from utils.general import Path, check_requirements, download, np, xyxy2xywhn
+
+  check_requirements('pycocotools>=2.0')
+  from pycocotools.coco import COCO
+
+  # Make Directories
+  dir = Path(yaml['path'])  # dataset root dir
+  for p in 'images', 'labels':
+      (dir / p).mkdir(parents=True, exist_ok=True)
+      for q in 'train', 'val':
+          (dir / p / q).mkdir(parents=True, exist_ok=True)
+
+  # Train, Val Splits
+  for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
+      print(f"Processing {split} in {patches} patches ...")
+      images, labels = dir / 'images' / split, dir / 'labels' / split
+
+      # Download
+      url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
+      if split == 'train':
+          download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False)  # annotations json
+          download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8)
+      elif split == 'val':
+          download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False)  # annotations json
+          download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8)
+          download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8)
+
+      # Move
+      for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
+          f.rename(images / f.name)  # move to /images/{split}
+
+      # Labels
+      coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
+      names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
+      for cid, cat in enumerate(names):
+          catIds = coco.getCatIds(catNms=[cat])
+          imgIds = coco.getImgIds(catIds=catIds)
+          for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):
+              width, height = im["width"], im["height"]
+              path = Path(im["file_name"])  # image filename
+              try:
+                  with open(labels / path.with_suffix('.txt').name, 'a') as file:
+                      annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=False)
+                      for a in coco.loadAnns(annIds):
+                          x, y, w, h = a['bbox']  # bounding box in xywh (xy top-left corner)
+                          xyxy = np.array([x, y, x + w, y + h])[None]  # pixels(1,4)
+                          x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0]  # normalized and clipped
+                          file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
+              except Exception as e:
+                  print(e)

+ 53 - 0
data/SKU-110K.yaml

@@ -0,0 +1,53 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
+# Example usage: python train.py --data SKU-110K.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── SKU-110K  ← downloads here (13.6 GB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/SKU-110K  # dataset root dir
+train: train.txt  # train images (relative to 'path')  8219 images
+val: val.txt  # val images (relative to 'path')  588 images
+test: test.txt  # test images (optional)  2936 images
+
+# Classes
+names:
+  0: object
+
+
+# Download script/URL (optional) ---------------------------------------------------------------------------------------
+download: |
+  import shutil
+  from tqdm import tqdm
+  from utils.general import np, pd, Path, download, xyxy2xywh
+
+
+  # Download
+  dir = Path(yaml['path'])  # dataset root dir
+  parent = Path(dir.parent)  # download dir
+  urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
+  download(urls, dir=parent, delete=False)
+
+  # Rename directories
+  if dir.exists():
+      shutil.rmtree(dir)
+  (parent / 'SKU110K_fixed').rename(dir)  # rename dir
+  (dir / 'labels').mkdir(parents=True, exist_ok=True)  # create labels dir
+
+  # Convert labels
+  names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height'  # column names
+  for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
+      x = pd.read_csv(dir / 'annotations' / d, names=names).values  # annotations
+      images, unique_images = x[:, 0], np.unique(x[:, 0])
+      with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
+          f.writelines(f'./images/{s}\n' for s in unique_images)
+      for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
+          cls = 0  # single-class dataset
+          with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
+              for r in x[images == im]:
+                  w, h = r[6], r[7]  # image width, height
+                  xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0]  # instance
+                  f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n")  # write label

+ 100 - 0
data/VOC.yaml

@@ -0,0 +1,100 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
+# Example usage: python train.py --data VOC.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── VOC  ← downloads here (2.8 GB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/VOC
+train: # train images (relative to 'path')  16551 images
+  - images/train2012
+  - images/train2007
+  - images/val2012
+  - images/val2007
+val: # val images (relative to 'path')  4952 images
+  - images/test2007
+test: # test images (optional)
+  - images/test2007
+
+# Classes
+names:
+  0: aeroplane
+  1: bicycle
+  2: bird
+  3: boat
+  4: bottle
+  5: bus
+  6: car
+  7: cat
+  8: chair
+  9: cow
+  10: diningtable
+  11: dog
+  12: horse
+  13: motorbike
+  14: person
+  15: pottedplant
+  16: sheep
+  17: sofa
+  18: train
+  19: tvmonitor
+
+
+# Download script/URL (optional) ---------------------------------------------------------------------------------------
+download: |
+  import xml.etree.ElementTree as ET
+
+  from tqdm import tqdm
+  from utils.general import download, Path
+
+
+  def convert_label(path, lb_path, year, image_id):
+      def convert_box(size, box):
+          dw, dh = 1. / size[0], 1. / size[1]
+          x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
+          return x * dw, y * dh, w * dw, h * dh
+
+      in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
+      out_file = open(lb_path, 'w')
+      tree = ET.parse(in_file)
+      root = tree.getroot()
+      size = root.find('size')
+      w = int(size.find('width').text)
+      h = int(size.find('height').text)
+
+      names = list(yaml['names'].values())  # names list
+      for obj in root.iter('object'):
+          cls = obj.find('name').text
+          if cls in names and int(obj.find('difficult').text) != 1:
+              xmlbox = obj.find('bndbox')
+              bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
+              cls_id = names.index(cls)  # class id
+              out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
+
+
+  # Download
+  dir = Path(yaml['path'])  # dataset root dir
+  url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
+  urls = [f'{url}VOCtrainval_06-Nov-2007.zip',  # 446MB, 5012 images
+          f'{url}VOCtest_06-Nov-2007.zip',  # 438MB, 4953 images
+          f'{url}VOCtrainval_11-May-2012.zip']  # 1.95GB, 17126 images
+  download(urls, dir=dir / 'images', delete=False, curl=True, threads=3)
+
+  # Convert
+  path = dir / 'images/VOCdevkit'
+  for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
+      imgs_path = dir / 'images' / f'{image_set}{year}'
+      lbs_path = dir / 'labels' / f'{image_set}{year}'
+      imgs_path.mkdir(exist_ok=True, parents=True)
+      lbs_path.mkdir(exist_ok=True, parents=True)
+
+      with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
+          image_ids = f.read().strip().split()
+      for id in tqdm(image_ids, desc=f'{image_set}{year}'):
+          f = path / f'VOC{year}/JPEGImages/{id}.jpg'  # old img path
+          lb_path = (lbs_path / f.name).with_suffix('.txt')  # new label path
+          f.rename(imgs_path / f.name)  # move image
+          convert_label(path, lb_path, year, id)  # convert labels to YOLO format

+ 70 - 0
data/VisDrone.yaml

@@ -0,0 +1,70 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
+# Example usage: python train.py --data VisDrone.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── VisDrone  ← downloads here (2.3 GB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/VisDrone  # dataset root dir
+train: VisDrone2019-DET-train/images  # train images (relative to 'path')  6471 images
+val: VisDrone2019-DET-val/images  # val images (relative to 'path')  548 images
+test: VisDrone2019-DET-test-dev/images  # test images (optional)  1610 images
+
+# Classes
+names:
+  0: pedestrian
+  1: people
+  2: bicycle
+  3: car
+  4: van
+  5: truck
+  6: tricycle
+  7: awning-tricycle
+  8: bus
+  9: motor
+
+
+# Download script/URL (optional) ---------------------------------------------------------------------------------------
+download: |
+  from utils.general import download, os, Path
+
+  def visdrone2yolo(dir):
+      from PIL import Image
+      from tqdm import tqdm
+
+      def convert_box(size, box):
+          # Convert VisDrone box to YOLO xywh box
+          dw = 1. / size[0]
+          dh = 1. / size[1]
+          return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
+
+      (dir / 'labels').mkdir(parents=True, exist_ok=True)  # make labels directory
+      pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
+      for f in pbar:
+          img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
+          lines = []
+          with open(f, 'r') as file:  # read annotation.txt
+              for row in [x.split(',') for x in file.read().strip().splitlines()]:
+                  if row[4] == '0':  # VisDrone 'ignored regions' class 0
+                      continue
+                  cls = int(row[5]) - 1
+                  box = convert_box(img_size, tuple(map(int, row[:4])))
+                  lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
+                  with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
+                      fl.writelines(lines)  # write label.txt
+
+
+  # Download
+  dir = Path(yaml['path'])  # dataset root dir
+  urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
+          'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
+          'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
+          'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
+  download(urls, dir=dir, curl=True, threads=4)
+
+  # Convert
+  for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
+      visdrone2yolo(dir / d)  # convert VisDrone annotations to YOLO labels

+ 116 - 0
data/coco.yaml

@@ -0,0 +1,116 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# COCO 2017 dataset http://cocodataset.org by Microsoft
+# Example usage: python train.py --data coco.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── coco  ← downloads here (20.1 GB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/coco  # dataset root dir
+train: train2017.txt  # train images (relative to 'path') 118287 images
+val: val2017.txt  # val images (relative to 'path') 5000 images
+test: test-dev2017.txt  # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
+
+# Classes
+names:
+  0: person
+  1: bicycle
+  2: car
+  3: motorcycle
+  4: airplane
+  5: bus
+  6: train
+  7: truck
+  8: boat
+  9: traffic light
+  10: fire hydrant
+  11: stop sign
+  12: parking meter
+  13: bench
+  14: bird
+  15: cat
+  16: dog
+  17: horse
+  18: sheep
+  19: cow
+  20: elephant
+  21: bear
+  22: zebra
+  23: giraffe
+  24: backpack
+  25: umbrella
+  26: handbag
+  27: tie
+  28: suitcase
+  29: frisbee
+  30: skis
+  31: snowboard
+  32: sports ball
+  33: kite
+  34: baseball bat
+  35: baseball glove
+  36: skateboard
+  37: surfboard
+  38: tennis racket
+  39: bottle
+  40: wine glass
+  41: cup
+  42: fork
+  43: knife
+  44: spoon
+  45: bowl
+  46: banana
+  47: apple
+  48: sandwich
+  49: orange
+  50: broccoli
+  51: carrot
+  52: hot dog
+  53: pizza
+  54: donut
+  55: cake
+  56: chair
+  57: couch
+  58: potted plant
+  59: bed
+  60: dining table
+  61: toilet
+  62: tv
+  63: laptop
+  64: mouse
+  65: remote
+  66: keyboard
+  67: cell phone
+  68: microwave
+  69: oven
+  70: toaster
+  71: sink
+  72: refrigerator
+  73: book
+  74: clock
+  75: vase
+  76: scissors
+  77: teddy bear
+  78: hair drier
+  79: toothbrush
+
+
+# Download script/URL (optional)
+download: |
+  from utils.general import download, Path
+
+
+  # Download labels
+  segments = False  # segment or box labels
+  dir = Path(yaml['path'])  # dataset root dir
+  url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
+  urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')]  # labels
+  download(urls, dir=dir.parent)
+
+  # Download data
+  urls = ['http://images.cocodataset.org/zips/train2017.zip',  # 19G, 118k images
+          'http://images.cocodataset.org/zips/val2017.zip',  # 1G, 5k images
+          'http://images.cocodataset.org/zips/test2017.zip']  # 7G, 41k images (optional)
+  download(urls, dir=dir / 'images', threads=3)

+ 101 - 0
data/coco128-seg.yaml

@@ -0,0 +1,101 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
+# Example usage: python train.py --data coco128.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── coco128-seg  ← downloads here (7 MB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/coco128-seg  # dataset root dir
+train: images/train2017  # train images (relative to 'path') 128 images
+val: images/train2017  # val images (relative to 'path') 128 images
+test:  # test images (optional)
+
+# Classes
+names:
+  0: person
+  1: bicycle
+  2: car
+  3: motorcycle
+  4: airplane
+  5: bus
+  6: train
+  7: truck
+  8: boat
+  9: traffic light
+  10: fire hydrant
+  11: stop sign
+  12: parking meter
+  13: bench
+  14: bird
+  15: cat
+  16: dog
+  17: horse
+  18: sheep
+  19: cow
+  20: elephant
+  21: bear
+  22: zebra
+  23: giraffe
+  24: backpack
+  25: umbrella
+  26: handbag
+  27: tie
+  28: suitcase
+  29: frisbee
+  30: skis
+  31: snowboard
+  32: sports ball
+  33: kite
+  34: baseball bat
+  35: baseball glove
+  36: skateboard
+  37: surfboard
+  38: tennis racket
+  39: bottle
+  40: wine glass
+  41: cup
+  42: fork
+  43: knife
+  44: spoon
+  45: bowl
+  46: banana
+  47: apple
+  48: sandwich
+  49: orange
+  50: broccoli
+  51: carrot
+  52: hot dog
+  53: pizza
+  54: donut
+  55: cake
+  56: chair
+  57: couch
+  58: potted plant
+  59: bed
+  60: dining table
+  61: toilet
+  62: tv
+  63: laptop
+  64: mouse
+  65: remote
+  66: keyboard
+  67: cell phone
+  68: microwave
+  69: oven
+  70: toaster
+  71: sink
+  72: refrigerator
+  73: book
+  74: clock
+  75: vase
+  76: scissors
+  77: teddy bear
+  78: hair drier
+  79: toothbrush
+
+
+# Download script/URL (optional)
+download: https://ultralytics.com/assets/coco128-seg.zip

+ 101 - 0
data/coco128.yaml

@@ -0,0 +1,101 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
+# Example usage: python train.py --data coco128.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── coco128  ← downloads here (7 MB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/coco128  # dataset root dir
+train: images/train2017  # train images (relative to 'path') 128 images
+val: images/train2017  # val images (relative to 'path') 128 images
+test:  # test images (optional)
+
+# Classes
+names:
+  0: person
+  1: bicycle
+  2: car
+  3: motorcycle
+  4: airplane
+  5: bus
+  6: train
+  7: truck
+  8: boat
+  9: traffic light
+  10: fire hydrant
+  11: stop sign
+  12: parking meter
+  13: bench
+  14: bird
+  15: cat
+  16: dog
+  17: horse
+  18: sheep
+  19: cow
+  20: elephant
+  21: bear
+  22: zebra
+  23: giraffe
+  24: backpack
+  25: umbrella
+  26: handbag
+  27: tie
+  28: suitcase
+  29: frisbee
+  30: skis
+  31: snowboard
+  32: sports ball
+  33: kite
+  34: baseball bat
+  35: baseball glove
+  36: skateboard
+  37: surfboard
+  38: tennis racket
+  39: bottle
+  40: wine glass
+  41: cup
+  42: fork
+  43: knife
+  44: spoon
+  45: bowl
+  46: banana
+  47: apple
+  48: sandwich
+  49: orange
+  50: broccoli
+  51: carrot
+  52: hot dog
+  53: pizza
+  54: donut
+  55: cake
+  56: chair
+  57: couch
+  58: potted plant
+  59: bed
+  60: dining table
+  61: toilet
+  62: tv
+  63: laptop
+  64: mouse
+  65: remote
+  66: keyboard
+  67: cell phone
+  68: microwave
+  69: oven
+  70: toaster
+  71: sink
+  72: refrigerator
+  73: book
+  74: clock
+  75: vase
+  76: scissors
+  77: teddy bear
+  78: hair drier
+  79: toothbrush
+
+
+# Download script/URL (optional)
+download: https://ultralytics.com/assets/coco128.zip

BIN
data/helmet.pt


+ 34 - 0
data/hyps/hyp.Objects365.yaml

@@ -0,0 +1,34 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Hyperparameters for Objects365 training
+# python train.py --weights yolov5m.pt --data Objects365.yaml --evolve
+# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
+
+lr0: 0.00258
+lrf: 0.17
+momentum: 0.779
+weight_decay: 0.00058
+warmup_epochs: 1.33
+warmup_momentum: 0.86
+warmup_bias_lr: 0.0711
+box: 0.0539
+cls: 0.299
+cls_pw: 0.825
+obj: 0.632
+obj_pw: 1.0
+iou_t: 0.2
+anchor_t: 3.44
+anchors: 3.2
+fl_gamma: 0.0
+hsv_h: 0.0188
+hsv_s: 0.704
+hsv_v: 0.36
+degrees: 0.0
+translate: 0.0902
+scale: 0.491
+shear: 0.0
+perspective: 0.0
+flipud: 0.0
+fliplr: 0.5
+mosaic: 1.0
+mixup: 0.0
+copy_paste: 0.0

+ 40 - 0
data/hyps/hyp.VOC.yaml

@@ -0,0 +1,40 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Hyperparameters for VOC training
+# python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve
+# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
+
+# YOLOv5 Hyperparameter Evolution Results
+# Best generation: 467
+# Last generation: 996
+#    metrics/precision,       metrics/recall,      metrics/mAP_0.5, metrics/mAP_0.5:0.95,         val/box_loss,         val/obj_loss,         val/cls_loss
+#              0.87729,              0.85125,              0.91286,              0.72664,            0.0076739,            0.0042529,            0.0013865
+
+lr0: 0.00334
+lrf: 0.15135
+momentum: 0.74832
+weight_decay: 0.00025
+warmup_epochs: 3.3835
+warmup_momentum: 0.59462
+warmup_bias_lr: 0.18657
+box: 0.02
+cls: 0.21638
+cls_pw: 0.5
+obj: 0.51728
+obj_pw: 0.67198
+iou_t: 0.2
+anchor_t: 3.3744
+fl_gamma: 0.0
+hsv_h: 0.01041
+hsv_s: 0.54703
+hsv_v: 0.27739
+degrees: 0.0
+translate: 0.04591
+scale: 0.75544
+shear: 0.0
+perspective: 0.0
+flipud: 0.0
+fliplr: 0.5
+mosaic: 0.85834
+mixup: 0.04266
+copy_paste: 0.0
+anchors: 3.412

+ 35 - 0
data/hyps/hyp.no-augmentation.yaml

@@ -0,0 +1,35 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Hyperparameters when using Albumentations frameworks
+# python train.py --hyp hyp.no-augmentation.yaml
+# See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples
+
+lr0: 0.01  # initial learning rate (SGD=1E-2, Adam=1E-3)
+lrf: 0.1  # final OneCycleLR learning rate (lr0 * lrf)
+momentum: 0.937  # SGD momentum/Adam beta1
+weight_decay: 0.0005  # optimizer weight decay 5e-4
+warmup_epochs: 3.0  # warmup epochs (fractions ok)
+warmup_momentum: 0.8  # warmup initial momentum
+warmup_bias_lr: 0.1  # warmup initial bias lr
+box: 0.05  # box loss gain
+cls: 0.3  # cls loss gain
+cls_pw: 1.0  # cls BCELoss positive_weight
+obj: 0.7  # obj loss gain (scale with pixels)
+obj_pw: 1.0  # obj BCELoss positive_weight
+iou_t: 0.20  # IoU training threshold
+anchor_t: 4.0  # anchor-multiple threshold
+# anchors: 3  # anchors per output layer (0 to ignore)
+# this parameters are all zero since we want to use albumentation framework
+fl_gamma: 0.0  # focal loss gamma (efficientDet default gamma=1.5)
+hsv_h: 0  # image HSV-Hue augmentation (fraction)
+hsv_s: 0  # image HSV-Saturation augmentation (fraction)
+hsv_v: 0  # image HSV-Value augmentation (fraction)
+degrees: 0.0  # image rotation (+/- deg)
+translate: 0  # image translation (+/- fraction)
+scale: 0  # image scale (+/- gain)
+shear: 0  # image shear (+/- deg)
+perspective: 0.0  # image perspective (+/- fraction), range 0-0.001
+flipud: 0.0  # image flip up-down (probability)
+fliplr: 0.0  # image flip left-right (probability)
+mosaic: 0.0  # image mosaic (probability)
+mixup: 0.0  # image mixup (probability)
+copy_paste: 0.0  # segment copy-paste (probability)

+ 34 - 0
data/hyps/hyp.scratch-high.yaml

@@ -0,0 +1,34 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Hyperparameters for high-augmentation COCO training from scratch
+# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
+# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
+
+lr0: 0.01  # initial learning rate (SGD=1E-2, Adam=1E-3)
+lrf: 0.1  # final OneCycleLR learning rate (lr0 * lrf)
+momentum: 0.937  # SGD momentum/Adam beta1
+weight_decay: 0.0005  # optimizer weight decay 5e-4
+warmup_epochs: 3.0  # warmup epochs (fractions ok)
+warmup_momentum: 0.8  # warmup initial momentum
+warmup_bias_lr: 0.1  # warmup initial bias lr
+box: 0.05  # box loss gain
+cls: 0.3  # cls loss gain
+cls_pw: 1.0  # cls BCELoss positive_weight
+obj: 0.7  # obj loss gain (scale with pixels)
+obj_pw: 1.0  # obj BCELoss positive_weight
+iou_t: 0.20  # IoU training threshold
+anchor_t: 4.0  # anchor-multiple threshold
+# anchors: 3  # anchors per output layer (0 to ignore)
+fl_gamma: 0.0  # focal loss gamma (efficientDet default gamma=1.5)
+hsv_h: 0.015  # image HSV-Hue augmentation (fraction)
+hsv_s: 0.7  # image HSV-Saturation augmentation (fraction)
+hsv_v: 0.4  # image HSV-Value augmentation (fraction)
+degrees: 0.0  # image rotation (+/- deg)
+translate: 0.1  # image translation (+/- fraction)
+scale: 0.9  # image scale (+/- gain)
+shear: 0.0  # image shear (+/- deg)
+perspective: 0.0  # image perspective (+/- fraction), range 0-0.001
+flipud: 0.0  # image flip up-down (probability)
+fliplr: 0.5  # image flip left-right (probability)
+mosaic: 1.0  # image mosaic (probability)
+mixup: 0.1  # image mixup (probability)
+copy_paste: 0.1  # segment copy-paste (probability)

+ 34 - 0
data/hyps/hyp.scratch-low.yaml

@@ -0,0 +1,34 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Hyperparameters for low-augmentation COCO training from scratch
+# python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear
+# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
+
+lr0: 0.01  # initial learning rate (SGD=1E-2, Adam=1E-3)
+lrf: 0.01  # final OneCycleLR learning rate (lr0 * lrf)
+momentum: 0.937  # SGD momentum/Adam beta1
+weight_decay: 0.0005  # optimizer weight decay 5e-4
+warmup_epochs: 3.0  # warmup epochs (fractions ok)
+warmup_momentum: 0.8  # warmup initial momentum
+warmup_bias_lr: 0.1  # warmup initial bias lr
+box: 0.05  # box loss gain
+cls: 0.5  # cls loss gain
+cls_pw: 1.0  # cls BCELoss positive_weight
+obj: 1.0  # obj loss gain (scale with pixels)
+obj_pw: 1.0  # obj BCELoss positive_weight
+iou_t: 0.20  # IoU training threshold
+anchor_t: 4.0  # anchor-multiple threshold
+# anchors: 3  # anchors per output layer (0 to ignore)
+fl_gamma: 0.0  # focal loss gamma (efficientDet default gamma=1.5)
+hsv_h: 0.015  # image HSV-Hue augmentation (fraction)
+hsv_s: 0.7  # image HSV-Saturation augmentation (fraction)
+hsv_v: 0.4  # image HSV-Value augmentation (fraction)
+degrees: 0.0  # image rotation (+/- deg)
+translate: 0.1  # image translation (+/- fraction)
+scale: 0.5  # image scale (+/- gain)
+shear: 0.0  # image shear (+/- deg)
+perspective: 0.0  # image perspective (+/- fraction), range 0-0.001
+flipud: 0.0  # image flip up-down (probability)
+fliplr: 0.5  # image flip left-right (probability)
+mosaic: 1.0  # image mosaic (probability)
+mixup: 0.0  # image mixup (probability)
+copy_paste: 0.0  # segment copy-paste (probability)

+ 34 - 0
data/hyps/hyp.scratch-med.yaml

@@ -0,0 +1,34 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Hyperparameters for medium-augmentation COCO training from scratch
+# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
+# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
+
+lr0: 0.01  # initial learning rate (SGD=1E-2, Adam=1E-3)
+lrf: 0.1  # final OneCycleLR learning rate (lr0 * lrf)
+momentum: 0.937  # SGD momentum/Adam beta1
+weight_decay: 0.0005  # optimizer weight decay 5e-4
+warmup_epochs: 3.0  # warmup epochs (fractions ok)
+warmup_momentum: 0.8  # warmup initial momentum
+warmup_bias_lr: 0.1  # warmup initial bias lr
+box: 0.05  # box loss gain
+cls: 0.3  # cls loss gain
+cls_pw: 1.0  # cls BCELoss positive_weight
+obj: 0.7  # obj loss gain (scale with pixels)
+obj_pw: 1.0  # obj BCELoss positive_weight
+iou_t: 0.20  # IoU training threshold
+anchor_t: 4.0  # anchor-multiple threshold
+# anchors: 3  # anchors per output layer (0 to ignore)
+fl_gamma: 0.0  # focal loss gamma (efficientDet default gamma=1.5)
+hsv_h: 0.015  # image HSV-Hue augmentation (fraction)
+hsv_s: 0.7  # image HSV-Saturation augmentation (fraction)
+hsv_v: 0.4  # image HSV-Value augmentation (fraction)
+degrees: 0.0  # image rotation (+/- deg)
+translate: 0.1  # image translation (+/- fraction)
+scale: 0.9  # image scale (+/- gain)
+shear: 0.0  # image shear (+/- deg)
+perspective: 0.0  # image perspective (+/- fraction), range 0-0.001
+flipud: 0.0  # image flip up-down (probability)
+fliplr: 0.5  # image flip left-right (probability)
+mosaic: 1.0  # image mosaic (probability)
+mixup: 0.1  # image mixup (probability)
+copy_paste: 0.0  # segment copy-paste (probability)

+ 22 - 0
data/scripts/download_weights.sh

@@ -0,0 +1,22 @@
+#!/bin/bash
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Download latest models from https://github.com/ultralytics/yolov5/releases
+# Example usage: bash data/scripts/download_weights.sh
+# parent
+# └── yolov5
+#     ├── yolov5s.pt  ← downloads here
+#     ├── yolov5m.pt
+#     └── ...
+
+python - <<EOF
+from utils.downloads import attempt_download
+
+p5 = list('nsmlx')  # P5 models
+p6 = [f'{x}6' for x in p5]  # P6 models
+cls = [f'{x}-cls' for x in p5]  # classification models
+seg = [f'{x}-seg' for x in p5]  # classification models
+
+for x in p5 + p6 + cls + seg:
+    attempt_download(f'weights/yolov5{x}.pt')
+
+EOF

+ 56 - 0
data/scripts/get_coco.sh

@@ -0,0 +1,56 @@
+#!/bin/bash
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Download COCO 2017 dataset http://cocodataset.org
+# Example usage: bash data/scripts/get_coco.sh
+# parent
+# ├── yolov5
+# └── datasets
+#     └── coco  ← downloads here
+
+# Arguments (optional) Usage: bash data/scripts/get_coco.sh --train --val --test --segments
+if [ "$#" -gt 0 ]; then
+  for opt in "$@"; do
+    case "${opt}" in
+    --train) train=true ;;
+    --val) val=true ;;
+    --test) test=true ;;
+    --segments) segments=true ;;
+    esac
+  done
+else
+  train=true
+  val=true
+  test=false
+  segments=false
+fi
+
+# Download/unzip labels
+d='../datasets' # unzip directory
+url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
+if [ "$segments" == "true" ]; then
+  f='coco2017labels-segments.zip' # 168 MB
+else
+  f='coco2017labels.zip' # 46 MB
+fi
+echo 'Downloading' $url$f ' ...'
+curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
+
+# Download/unzip images
+d='../datasets/coco/images' # unzip directory
+url=http://images.cocodataset.org/zips/
+if [ "$train" == "true" ]; then
+  f='train2017.zip' # 19G, 118k images
+  echo 'Downloading' $url$f '...'
+  curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
+fi
+if [ "$val" == "true" ]; then
+  f='val2017.zip' # 1G, 5k images
+  echo 'Downloading' $url$f '...'
+  curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
+fi
+if [ "$test" == "true" ]; then
+  f='test2017.zip' # 7G, 41k images (optional)
+  echo 'Downloading' $url$f '...'
+  curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
+fi
+wait # finish background tasks

+ 17 - 0
data/scripts/get_coco128.sh

@@ -0,0 +1,17 @@
+#!/bin/bash
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
+# Example usage: bash data/scripts/get_coco128.sh
+# parent
+# ├── yolov5
+# └── datasets
+#     └── coco128  ← downloads here
+
+# Download/unzip images and labels
+d='../datasets' # unzip directory
+url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
+f='coco128.zip' # or 'coco128-segments.zip', 68 MB
+echo 'Downloading' $url$f ' ...'
+curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
+
+wait # finish background tasks

+ 51 - 0
data/scripts/get_imagenet.sh

@@ -0,0 +1,51 @@
+#!/bin/bash
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Download ILSVRC2012 ImageNet dataset https://image-net.org
+# Example usage: bash data/scripts/get_imagenet.sh
+# parent
+# ├── yolov5
+# └── datasets
+#     └── imagenet  ← downloads here
+
+# Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val
+if [ "$#" -gt 0 ]; then
+  for opt in "$@"; do
+    case "${opt}" in
+    --train) train=true ;;
+    --val) val=true ;;
+    esac
+  done
+else
+  train=true
+  val=true
+fi
+
+# Make dir
+d='../datasets/imagenet' # unzip directory
+mkdir -p $d && cd $d
+
+# Download/unzip train
+if [ "$train" == "true" ]; then
+  wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_train.tar # download 138G, 1281167 images
+  mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train
+  tar -xf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar
+  find . -name "*.tar" | while read NAME; do
+    mkdir -p "${NAME%.tar}"
+    tar -xf "${NAME}" -C "${NAME%.tar}"
+    rm -f "${NAME}"
+  done
+  cd ..
+fi
+
+# Download/unzip val
+if [ "$val" == "true" ]; then
+  wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar # download 6.3G, 50000 images
+  mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xf ILSVRC2012_img_val.tar
+  wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash # move into subdirs
+fi
+
+# Delete corrupted image (optional: PNG under JPEG name that may cause dataloaders to fail)
+# rm train/n04266014/n04266014_10835.JPEG
+
+# TFRecords (optional)
+# wget https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_lsvrc_2015_synsets.txt

+ 18 - 0
data/smoke.yaml

@@ -0,0 +1,18 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
+# Example usage: python train.py --data coco128.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── coco128  ← downloads here (7 MB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: /mnt/project/yolodemo/dataset/smoke  # dataset root dir
+train: images  # train images (relative to 'path') 128 images
+val: images  # val images (relative to 'path') 128 images
+test:  # test images (optional)
+
+# Classes
+names:
+  0: smoke

+ 20 - 0
data/uniform.yaml

@@ -0,0 +1,20 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
+# Example usage: python train.py --data coco128.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── coco128  ← downloads here (7 MB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: /mnt/project/yolodemo/dataset/uniform  # dataset root dir
+train: images  # train images (relative to 'path') 128 images
+val: images  # val images (relative to 'path') 128 images
+test:  # test images (optional)
+
+# Classes
+names:
+  0: other
+  1: reflective-vest
+  2: uniform

+ 153 - 0
data/xView.yaml

@@ -0,0 +1,153 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)
+# --------  DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command!  --------
+# Example usage: python train.py --data xView.yaml
+# parent
+# ├── yolov5
+# └── datasets
+#     └── xView  ← downloads here (20.7 GB)
+
+
+# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
+path: ../datasets/xView  # dataset root dir
+train: images/autosplit_train.txt  # train images (relative to 'path') 90% of 847 train images
+val: images/autosplit_val.txt  # train images (relative to 'path') 10% of 847 train images
+
+# Classes
+names:
+  0: Fixed-wing Aircraft
+  1: Small Aircraft
+  2: Cargo Plane
+  3: Helicopter
+  4: Passenger Vehicle
+  5: Small Car
+  6: Bus
+  7: Pickup Truck
+  8: Utility Truck
+  9: Truck
+  10: Cargo Truck
+  11: Truck w/Box
+  12: Truck Tractor
+  13: Trailer
+  14: Truck w/Flatbed
+  15: Truck w/Liquid
+  16: Crane Truck
+  17: Railway Vehicle
+  18: Passenger Car
+  19: Cargo Car
+  20: Flat Car
+  21: Tank car
+  22: Locomotive
+  23: Maritime Vessel
+  24: Motorboat
+  25: Sailboat
+  26: Tugboat
+  27: Barge
+  28: Fishing Vessel
+  29: Ferry
+  30: Yacht
+  31: Container Ship
+  32: Oil Tanker
+  33: Engineering Vehicle
+  34: Tower crane
+  35: Container Crane
+  36: Reach Stacker
+  37: Straddle Carrier
+  38: Mobile Crane
+  39: Dump Truck
+  40: Haul Truck
+  41: Scraper/Tractor
+  42: Front loader/Bulldozer
+  43: Excavator
+  44: Cement Mixer
+  45: Ground Grader
+  46: Hut/Tent
+  47: Shed
+  48: Building
+  49: Aircraft Hangar
+  50: Damaged Building
+  51: Facility
+  52: Construction Site
+  53: Vehicle Lot
+  54: Helipad
+  55: Storage Tank
+  56: Shipping container lot
+  57: Shipping Container
+  58: Pylon
+  59: Tower
+
+
+# Download script/URL (optional) ---------------------------------------------------------------------------------------
+download: |
+  import json
+  import os
+  from pathlib import Path
+
+  import numpy as np
+  from PIL import Image
+  from tqdm import tqdm
+
+  from utils.dataloaders import autosplit
+  from utils.general import download, xyxy2xywhn
+
+
+  def convert_labels(fname=Path('xView/xView_train.geojson')):
+      # Convert xView geoJSON labels to YOLO format
+      path = fname.parent
+      with open(fname) as f:
+          print(f'Loading {fname}...')
+          data = json.load(f)
+
+      # Make dirs
+      labels = Path(path / 'labels' / 'train')
+      os.system(f'rm -rf {labels}')
+      labels.mkdir(parents=True, exist_ok=True)
+
+      # xView classes 11-94 to 0-59
+      xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11,
+                           12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1,
+                           29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46,
+                           47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59]
+
+      shapes = {}
+      for feature in tqdm(data['features'], desc=f'Converting {fname}'):
+          p = feature['properties']
+          if p['bounds_imcoords']:
+              id = p['image_id']
+              file = path / 'train_images' / id
+              if file.exists():  # 1395.tif missing
+                  try:
+                      box = np.array([int(num) for num in p['bounds_imcoords'].split(",")])
+                      assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}'
+                      cls = p['type_id']
+                      cls = xview_class2index[int(cls)]  # xView class to 0-60
+                      assert 59 >= cls >= 0, f'incorrect class index {cls}'
+
+                      # Write YOLO label
+                      if id not in shapes:
+                          shapes[id] = Image.open(file).size
+                      box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
+                      with open((labels / id).with_suffix('.txt'), 'a') as f:
+                          f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n")  # write label.txt
+                  except Exception as e:
+                      print(f'WARNING: skipping one label for {file}: {e}')
+
+
+  # Download manually from https://challenge.xviewdataset.org
+  dir = Path(yaml['path'])  # dataset root dir
+  # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip',  # train labels
+  #         'https://d307kc0mrhucc3.cloudfront.net/train_images.zip',  # 15G, 847 train images
+  #         'https://d307kc0mrhucc3.cloudfront.net/val_images.zip']  # 5G, 282 val images (no labels)
+  # download(urls, dir=dir, delete=False)
+
+  # Convert labels
+  convert_labels(dir / 'xView_train.geojson')
+
+  # Move images
+  images = Path(dir / 'images')
+  images.mkdir(parents=True, exist_ok=True)
+  Path(dir / 'train_images').rename(dir / 'images' / 'train')
+  Path(dir / 'val_images').rename(dir / 'images' / 'val')
+
+  # Split
+  autosplit(dir / 'images' / 'train')

+ 2154 - 0
detectopencvmut0113auta.py

@@ -0,0 +1,2154 @@
+# YOLOv5 �� by Ultralytics, AGPL-3.0 license
+
+"""
+
+Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
+
+
+
+Usage - sources:
+
+    $ python detect.py --weights yolov5s.pt --source 0                               # webcam
+
+                                                     img.jpg                         # image
+
+                                                     vid.mp4                         # video
+
+                                                     screen                          # screenshot
+
+                                                     path/                           # directory
+
+                                                     list.txt                        # list of images
+
+                                                     list.streams                    # list of streams
+
+                                                     'path/*.jpg'                    # glob
+
+                                                     'https://youtu.be/Zgi9g1ksQHc'  # YouTube
+
+                                                     'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
+
+
+
+Usage - formats:
+
+    $ python detect.py --weights yolov5s.pt                 # PyTorch
+
+                                 yolov5s.torchscript        # TorchScript
+
+                                 yolov5s.onnx               # ONNX Runtime or OpenCV DNN with --dnn
+
+                                 yolov5s_openvino_model     # OpenVINO
+
+                                 yolov5s.engine             # TensorRT
+
+                                 yolov5s.mlmodel            # CoreML (macOS-only)
+
+                                 yolov5s_saved_model        # TensorFlow SavedModel
+
+                                 yolov5s.pb                 # TensorFlow GraphDef
+
+                                 yolov5s.tflite             # TensorFlow Lite
+
+                                 yolov5s_edgetpu.tflite     # TensorFlow Edge TPU
+
+                                 yolov5s_paddle_model       # PaddlePaddle
+
+"""
+
+import matplotlib.path as mat
+
+import requests
+
+import argparse
+
+import os
+
+import platform
+
+import sqlite3
+
+import sys
+
+import threading
+
+import time
+
+from pathlib import Path
+
+import signal
+
+import torch
+
+from concurrent.futures import ThreadPoolExecutor
+
+from concurrent.futures import ProcessPoolExecutor
+
+from multiprocessing import Process,Manager,Value
+
+from multiprocessing import Queue
+
+from multiprocessing import set_start_method
+
+import multiprocessing
+
+import multiprocessing as mp
+
+import numpy as np
+
+from torchvision import transforms
+
+FILE = Path(__file__).resolve()
+
+ROOT = FILE.parents[0]  # YOLOv5 root directory
+
+if str(ROOT) not in sys.path:
+
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+
+
+
+from models.common import DetectMultiBackend
+
+from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLT,LoadStreamsSQLTN
+
+from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
+
+                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,apply_classifier1,apply_classifieruniform,compute_IOU,task,apply_classifierarm)
+
+from utils.plots import Annotator, colors, save_one_box
+
+from utils.torch_utils import select_device, smart_inference_mode
+
+from utils.renwu import newHelmet,newUniform,Fall,Personcount,Arm,Bag,Cross,Extinguisher,Persontre,Bag,Danager
+
+#from testpool import func1,TestA
+
+
+
+# def my_handler(signum, frame):
+
+#     exit(0)
+
+#url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
+
+url = "http://172.19.152.231/open/api/operate/upload"
+
+urlrtsp = "http://172.19.152.231/open/api/operate/previewURLs"
+
+urlt = "http://172.19.152.231/open/api/operate/taskList"
+
+urla = "http://172.19.152.231/open/api/operate/algorithmList"
+
+urlele = "http://172.19.152.231/open/api/operate/fence"
+
+urltime = "http://172.19.152.231/open/api/operate/getTime"
+
+urlperson = "http://172.19.152.231/open/api/operate/getPersonLimitNum"
+
+#modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person'}
+
+#algmodel = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51'}
+
+modelnamedir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'duty','98':'face','55':'oil','52':'jingdian','53':'rope','54':'personcar','39':'inspection','11':'reflective','12':'phone','66':'extinguisher','67':'tizi','68':'menjin','35':'arm','36':'persontre','33':'bag'}
+
+modellabeldir = {'0':'head,person','8':'person','10':'black_work_clothes,blue_work_clothes,person','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'person','98':'face','55':'oil','52':'person,hand,ball','53':'rope','54':'person','39':'person','11':'blue,greent,whitet,bluecoat,whitebarcoat,graycoat,baoan,chenyi,other','12':'phone','66':'extinguisher','67':'person,tizi','68':'person','35':'barearm,arm','36':'person,foot,cart,bag,box','33':'handbox,handbag'}
+
+modelalgdir = {}
+
+personcountdir = {}
+
+for key,value in modelnamedir.items():
+
+    modelalgdir[value] = key
+
+taskmap = {'helmet':newHelmet,'uniform':newUniform,'fall':Fall,'personcount':Personcount,'arm':Arm,'bag':Bag,'cross':Cross,'extinguisher':Extinguisher,'persontre':Persontre,'bag':Bag,'danager':Danager}
+
+mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
+
+test = transforms.Compose([transforms.Resize((224,224)),
+
+        #transforms.CenterCrop(224),
+
+        transforms.ToTensor(),
+
+        transforms.Normalize(mean=mean, std=std)
+
+                           ])
+
+def clapre(modelcla,claimg,clapoint):
+
+    imgten = torch.stack(claimg,dim=0)
+
+    clapoint = torch.stack(clapoint,dim=0)
+
+    imgten = imgten.to(0)
+
+    result = modelcla(imgten)
+
+    result = F.softmax(result)
+
+    print(result)
+
+    index = result.argmax(1)
+
+    index = index.cpu().numpy()
+
+    index = np.argwhere(index<5)
+
+    index = index.reshape(-1)
+
+    print(index)
+
+    if len(index)>0:
+
+        print(clapoint[index])
+
+        return clapoint[index]
+
+    else:
+
+        return None
+
+
+
+class YoloOpt:
+
+    def __init__(self, weights=ROOT / 'yolov5s.pt',source=ROOT / 'data/images',data=ROOT / 'data/coco128.yaml',
+
+                 imgsz=(640,640),
+
+                 conf_thres=0.25,
+
+                 iou_thres=0.45,
+
+                 max_det=1000,
+
+                 device='',
+
+                 view_img=False,
+
+                 save_txt=False,
+
+                 save_conf=False,
+
+                 save_crop=False,
+
+                 nosave=True,
+
+                 classes=None,
+
+                 agnostic_nms=False,
+
+                 augment=False,
+
+                 visualize=False,
+
+                 update=False,
+
+                 project=ROOT / 'runs/detect',
+
+                 name='exp',
+
+                 exist_ok=False,
+
+                 line_thickness=1,
+
+                 hide_labels=False,
+
+                 hide_conf=False,
+
+                 half=False,
+
+                 dnn=False,
+
+                 vid_stride=10,
+
+                 classify=False,
+
+                 v8=False):
+
+
+
+        self.weights = weights  # 权重文件地址
+
+        self.source = source  # 待识别的图像
+
+        self.data = data
+
+        if imgsz is None:
+
+            self.imgsz = (640, 640)
+
+        self.imgsz = imgsz  # 输入图片的大小,默认 (640,640)
+
+        self.conf_thres = conf_thres  # object置信度阈值 默认0.25  用在nms中
+
+        self.iou_thres = iou_thres  # 做nms的iou阈值 默认0.45   用在nms中
+
+        self.device = device  # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法
+
+        self.view_img = view_img  # 是否展示预测之后的图片或视频 默认False
+
+        self.classes = classes  # 只保留一部分的类别,默认是全部保留
+
+        self.agnostic_nms = agnostic_nms  # 进行NMS去除不同类别之间的框, 默认False
+
+        self.augment = augment  # augmented inference TTA测试时增强/多尺度预测,可以提分
+
+        self.update = update  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+
+        self.exist_ok = exist_ok  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+
+        self.project = project  # 保存测试日志的参数,本程序没有用到
+
+        self.name = name  # 每次实验的名称,本程序也没有用到
+
+        self.max_det = max_det
+
+        self.save_txt = save_txt
+
+        self.save_conf= save_conf
+
+        self.save_crop= save_crop
+
+        self.nosave = nosave
+
+        self.visualize = visualize
+
+        self.line_thickness = line_thickness
+
+        self.hide_labels = hide_labels
+
+        self.hide_conf = hide_conf
+
+        self.half = half
+
+        self.dnn = dnn
+
+        self.vid_stride = vid_stride
+
+        self.classify = classify
+
+        self.v8 = v8
+
+class Detect:
+
+    def __init__(self, weights = ROOT / 'yolov5s.pt' , imgsz=(640,640),source="changshusql1103.db",classes=None,device=None,classify=False,conf_thres=0.25,v8=False):
+
+        print(f'detectweights = {weights}')
+
+        if v8:
+
+            from ultralytics.nn.autobackend import AutoBackend
+
+            from ultralytics.utils.ops import non_max_suppression
+
+        else:
+
+            from utils.general import non_max_suppression
+
+        self.opt = YoloOpt(weights=weights, imgsz=imgsz,source=source,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
+
+        self.source = str(self.opt.source)
+
+        self.save_img = not self.opt.nosave and not source.endswith('.txt')  # save inference images
+
+        is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
+
+        is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
+
+        self.webcam = self.source.isnumeric() or source.endswith('.db') or (is_url and not is_file)
+
+        screenshot = self.source.lower().startswith('screen')
+
+        if is_url and is_file:
+
+            self.source = check_file(self.source)  # download
+
+        self.save_dir = increment_path(Path(self.opt.project) / self.opt.name, exist_ok=self.opt.exist_ok)  # increment run
+
+        #self.save_dir = self.save_dir / Path(self.opt.weights).stem
+
+        #self.save_dir.mkdir(parents=True, exist_ok=True)
+
+        (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)  # make dir
+
+        print(f'device = {self.opt.device}')
+
+        device = select_device(self.opt.device)
+
+        if v8:
+
+            self.model = AutoBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+            if Path(weights).stem in ['arm', 'uniform']:
+
+                if Path(weights).stem == 'arm':
+
+                    self.personmodel = AutoBackend('yolov8m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+                elif Path(weights).stem == 'uniform':
+
+                    self.personmodel = AutoBackend('yolo11m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+        else:
+
+            self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+            if Path(weights).stem in ['helmet','arm']:
+
+                self.personmodel = DetectMultiBackend('personcount.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)        
+
+        self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
+
+        self.classify = classify
+
+        if self.classify:
+
+            #if Path(weights).stem =='uniform':
+
+            #    self.classifier_model = AutoBackend(f"{Path(weights).stem}cls.pt",device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+            #else:
+            if Path(weights).stem != "arm":
+                classifier_model = torch.load(f"{Path(weights).stem}cls.pt")
+
+                self.classifier_model = classifier_model.to(device)
+
+                self.classifier_model.eval()
+            else:
+                self.classifier_model = AutoBackend(f"{Path(weights).stem}cls.pt",device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+        self.imgsz = check_img_size(self.opt.imgsz, s=self.stride)
+
+        self.model.warmup(imgsz=(1 , 3, *self.imgsz))
+
+        self.readpoint()
+
+        print(self.imgsz)
+
+        self.updatetime = time.time()
+
+        self.updatemtime = time.time()
+
+        self.filetime = os.path.getmtime(self.opt.weights)
+
+        self.taskname = taskmap[Path(self.opt.weights).stem]()
+
+        bs = 1  # batch_size
+
+        if self.webcam:
+
+            #self.view_img = check_imshow(warn=True)
+
+            self.view_img = False
+
+            # dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
+
+            # bs = len(dataset)
+
+        elif screenshot:
+
+            dataset = LoadScreenshots(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt)
+
+        else:
+
+            dataset = LoadImages(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt, vid_stride=self.opt.vid_stride)
+
+        t1 = threading.Thread(target=self.load,daemon=True)
+
+        t1.start()
+
+    @smart_inference_mode()
+
+    def infer(self,queue,runmodel):
+
+            pretime = time.time()
+
+            seen, windows, self.dt = 0, [], (Profile(), Profile(), Profile())
+
+            #
+
+            # print ("数据库打开成功")
+
+            while True:
+
+                if time.localtime().tm_hour not in range(7,20):
+
+                    time.sleep(30)
+
+                    continue
+
+                #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
+
+                if time.time()-pretime>300:
+
+                    ret = self.readpoint()
+
+                    pretime = time.time()
+
+                    if not ret:
+
+                        print(f'{Path(self.opt.weights).stem} {runmodel}')
+
+                        runmodel.pop(Path(self.opt.weights).stem)
+
+                        print(f'{Path(self.opt.weights).stem} {runmodel}')
+
+                        break
+
+                print(f'queuelen = {len(queue)}')
+
+                for que in queue:
+
+                    if que.qsize() == 0:
+
+                        print('queuezero')
+
+                        time.sleep(0.01)
+
+                    if que.qsize() > 0:
+
+                        #if time.time()-pretime>300:
+
+                        #    ret = self.readpoint()
+
+                        #    pretime = time.time()
+
+                        #    if not ret:
+
+                        #        print(f'{Path(self.opt.weights).stem} {runmodel}')
+
+                        #        runmodel.pop(Path(self.opt.weights).stem)
+
+                        #        print(f'{Path(self.opt.weights).stem} {runmodel}')
+
+                        #        break
+
+                        setframe = que.get()
+
+                    # print('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')
+
+                    #if setframe is not None
+
+                        path, im, im0s, vid_cap, s, videotime ,channels = setframe
+
+                        algchannel = list(self.dirmodel.keys())
+
+                        print(algchannel)
+
+                        print(path)
+
+                        algchannel = np.array(algchannel)
+
+                        channelsnp = np.array(channels)
+
+                        algindex = np.where(np.in1d(channelsnp, algchannel))[0]
+
+                        algindex = list(algindex)
+
+
+
+                        path = np.array(path)
+
+                        path = path[algindex]
+
+                        path = path.tolist()
+
+                        channels = np.array(channels)
+
+                        channels = channels[algindex]
+
+                        channels = channels.tolist()
+
+                   # print(algindex)
+
+                        if len(algindex)==0:
+
+                            continue
+
+                    #for ia in algindex:
+
+                    #    print(type(im0s[ia]))
+
+                    #    print(im0s[ia].shape)
+
+                        im = im[algindex]
+
+                    #for ia in algindex:
+
+                    #    print(type(ia))
+
+                        try: 
+
+                            im0s = np.asarray(im0s)
+
+                        except Exception:
+
+                            im0s = np.asarray(im0s,dtype=object)
+
+                        print(im0s.shape)
+
+                        im0s = im0s[algindex]
+
+                    # im0s = im0s.tolist()
+
+                        print(f'algindex = {algindex}')
+
+                        print(f'im0s ={im0s[0].shape}')
+
+                        videotime = np.array(videotime)
+
+                        videotime = videotime[algindex]
+
+                        videotime = tuple(map(tuple, videotime))
+
+                    # global tag
+
+                    # if self.contentid[0][3] == 1 :
+
+                    #    dataset.close()
+
+                    #    print('newstreaming=', self.contentid[0][3])
+
+                    #    conn = sqlite3.connect(self.source)
+
+                    #    c = conn.cursor()
+
+                    #    c.execute("UPDATE CHANGESTREAM set streamimg = 0 , addstream=0,delstream=0 where modelname= (?)",(Path(self.opt.weights).stem,))
+
+                    #    print(123)
+
+                    #    conn.commit()
+
+                    #    c.close()
+
+                    #    conn.close()
+
+                    #    print('opencv1')
+
+                    # cv2.destroyAllWindows()
+
+                    #    print('opencv')
+
+                    #    break
+
+                    # else:
+
+                    #    print('nonewstreaming=', self.contentid[0][3])
+
+                        with self.dt[0]:
+
+                            im = torch.from_numpy(im).to(self.model.device)
+
+                            im = im.half() if self.model.fp16 else im.float()  # uint8 to fp16/32
+
+                            im /= 255  # 0 - 255 to 0.0 - 1.0
+
+                            if len(im.shape) == 3:
+
+                                im = im[None]  # expand for batch dim
+
+
+
+                    # Inference
+
+                        with self.dt[1]:
+
+                            visualize = increment_path(self.save_dir / Path(path).stem,
+
+                                                   mkdir=True) if self.opt.visualize else False
+
+                        #print('error')
+
+                        # print(self.model)
+
+                            pred = self.model(im, augment=self.opt.augment, visualize=visualize)
+
+                        self.postprocess(pred, path, im0s, im, s, videotime,channels)
+
+               # print(f'predshape= {')
+
+
+
+            # NMS
+
+                #processlist = []
+
+            #for i in range(3):
+
+            #    process = Process(target=self.postprocess,args=(pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s))
+
+            #    process = Process(target=self.preprocess)
+
+            #    process.start()
+
+            #    processlist.append(process)
+
+            #for j in processlist:
+
+            #    j.join()
+
+            #with ProcessPoolExecutor(3) as ppool:
+
+                #for i in range(3):
+
+            #        print('hello')
+
+                    #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
+
+                    #ppool.submit(func1, '张三', i)
+
+                    #ppool.submit(self.preprocess)
+
+            #self.postprocess(pred, path, im0s, dataset, im, s)
+
+            #process = Process(target=self.postprocess, args=(pred, path, im0s, dataset, im, s))
+
+                #self.postprocess(pred, path, im0s, im, s,videotime)
+
+                #process.start()
+
+    #def preprocess(self):
+
+    #    print('preprocess-----------------------------------------------')
+
+    def postprocess(self, pred, path, im0s,  im, s,videotime,channels):
+
+        
+
+        if time.time()-self.updatemtime>300:
+
+            if self.filetime !=os.path.getmtime(self.opt.weights):
+
+                device = select_device(self.opt.device)
+
+                print("load new load")
+
+                self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+                self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
+
+                self.filetime = os.path.getmtime(self.opt.weights)
+
+            #try:
+
+            #if  modelalgdir[Path(self.opt.weights).stem]!='0':
+
+            print(modelalgdir[Path(self.opt.weights).stem])
+
+            try:
+
+                rea = requests.post(url=urla,data={'algorithmCode':modelalgdir[Path(self.opt.weights).stem]}).json()['data']
+
+                con = rea[0]['confidence']
+
+                self.opt.conf_thres = con
+
+            except Exception:
+
+                print('error')
+
+            #else:
+
+            #    self.opt.conf_thres = 0.25
+
+            #except Exception:
+
+                #print('posturlaerror')
+
+            self.updatemtime = time.time()
+
+
+
+        seen = 0
+
+        # dt = (Profile(), Profile(), Profile())
+
+        print(f'senn = {seen}')
+
+        windows = []
+
+        if Path(self.opt.weights).stem:
+
+            labelnamelist = []
+
+        with self.dt[2]:
+
+            #print(f'cropshape={pred.shape}')
+
+            if self.opt.v8:
+
+                from ultralytics.utils.ops import non_max_suppression
+
+            else:
+
+                from utils.general import non_max_suppression
+
+            pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, self.opt.classes,
+
+                                       self.opt.agnostic_nms, max_det=self.opt.max_det)
+
+
+
+            # Second-stage classifier (optional)
+
+            # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
+
+            if self.classify and Path(self.opt.weights).stem!='persontre':
+
+                if Path(self.opt.weights).stem == 'arm':
+
+                    pred = apply_classifierarm(pred,self.classifier_model,im,im0s,Path(self.opt.weights).stem)
+
+                else:
+
+                    pred = apply_classifier1(pred,self.classifier_model,im,im0s,Path(self.opt.weights).stem)
+
+            # Process predictions
+
+            #print(f'predshape={pred.shape}')
+
+        for i, det in enumerate(pred):  # per image
+
+            if time.time()-self.updatetime>300:
+
+                dataele = {
+
+                    "algorithmCode": self.dirmodel[channels[i]]['classindex'],
+
+                    "algorithmIp":self.dirmodel[channels[i]]['algip'],
+
+                    "channel":self.dirmodel[channels[i]]['channel']
+
+                }
+
+                try:
+
+                    resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
+
+                    resultele = resultele.split(',||')
+
+                    resultele = tuple(resultele)
+
+                    point = '%s:'*len(resultele) %resultele
+
+                    if len(point[:-2])>1:
+
+                        self.dirmodel[channels[i]]['point'] = point[:-2]
+
+                except Exception:
+
+                    print('post error')
+
+                if Path(self.opt.weights).stem == 'personcount':
+
+                    try:
+
+                        resultper = requests.post(url=urlperson,data=dataele).json()['data']
+
+                        personcountdir[channels[i]] = int(resultper)
+
+                    except Exception:
+
+                        print('urlpersonerror')
+
+                if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty' :
+
+                    datatime= {
+
+                                    "algorithmCode": self.dirmodel[channels[i]]['classindex'],
+
+                                    "algorithmIp":self.dirmodel[channels[i]]['algip'],
+
+                                    "channel":self.dirmodel[channels[i]]['channel']
+
+                                }
+
+                    try:
+
+                        resulttime = requests.post(url=urltime,data=dataele).json()['data']
+
+                        self.dirmodel[channel]['durtime'] = int(resulttime)
+
+                    except Exception:
+
+                        print('posttime error')
+
+                self.updatetime = time.time()
+
+            seen += 1
+
+            if self.webcam:  # batch_size >= 1
+
+                p, im0 = path[i], im0s[i].copy()
+
+                s += f'{i}: '
+
+            else:
+
+                p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
+
+
+
+            p = Path(p)  # to Path
+
+
+
+            save_path = str(self.save_dir / p.name)  # im.jpg
+
+            #txt_path = str(self.save_dir / 'labels' / p.stem) + (
+
+               # '' #if dataset.mode == 'image' else f'_{frame}')  # im.txt
+
+            s += '%gx%g ' % im.shape[2:]  # print string
+
+            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
+
+            imc = im0.copy()  # for save_crop
+
+            annotator = Annotator(im0, line_width=self.opt.line_thickness, example=str(self.names))
+
+            flag = False
+
+            if len(det) and Path(self.opt.weights).stem != 'duty':
+
+                #flag = True
+
+                # Rescale boxes from img_size to im0 size
+
+                det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
+
+
+
+                # Print results
+
+                for c in det[:, 5].unique():
+
+                    n = (det[:, 5] == c).sum()  # detections per class
+
+                    s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, "  # add to string
+
+
+
+                # Write results
+
+                if Path(self.opt.weights).stem in ['arm', 'uniform']:
+
+                    personpred = self.personmodel(im[i][None], None, None)
+
+                    personpred = non_max_suppression(personpred, 0.7, self.opt.iou_thres, 0,
+
+                                                     self.opt.agnostic_nms, max_det=self.opt.max_det)
+
+                    if len(personpred[0])==0:
+
+                       flag = False
+
+                    elif Path(self.opt.weights).stem == 'other':
+
+                        persondet = []
+
+                        personpred =  personpred[0]
+
+                        personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
+
+                        for *perxyxy,conf,cls in reversed(personpred):
+
+                            print(perxyxy)
+
+                            x1,y1,x3,y3 = perxyxy
+
+                            x1,y1,x3,y3 = int(x1),int(y1),int(x3),int(y3)
+
+                            x2,y2 = x3,y1
+
+                            x4,y4 = x1,y3
+
+                        flag = self.taskname.getflag(det, persondet,annotator, self.dirmodel[channels[i]]['fence'],
+
+                                                 self.dirmodel[channels[i]]['point'], self.names,
+
+                                                 self.dirmodel[channels[i]]['label'])
+
+    
+
+                    else:
+
+                        persondet = []
+
+                        personpred =  personpred[0]
+
+                        personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
+
+                        for *perxyxy,conf,cls in reversed(personpred):
+
+                            print(perxyxy)
+
+                            if conf<0.8:
+
+                                continue
+
+                            x1,y1,x3,y3 = perxyxy
+
+                            x1,y1,x3,y3 = int(x1),int(y1),int(x3),int(y3)
+
+                            x2,y2 = x3,y1
+
+                            x4,y4 = x1,y3
+
+                            persondet.append([x1,y1,x2,y2,x3,y3,x4,y4])
+
+                        flag = self.taskname.getflag(det,persondet,annotator,self.dirmodel[channels[i]]['fence'],self.dirmodel[channels[i]]['point'],self.names,self.dirmodel[channels[i]]['label'])
+
+                else:
+
+                    if Path(self.opt.weights).stem in ['personcount']:
+
+                        flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
+
+                                                 self.dirmodel[channels[i]]['point'], self.names,
+
+                                                 self.dirmodel[channels[i]]['label'],personcountdir[channels[i]])
+
+                    elif Path(self.opt.weights).stem in ['persontre']:
+
+                        flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
+
+                                                 self.dirmodel[channels[i]]['point'], self.names,
+
+                                                 self.dirmodel[channels[i]]['label'],1,imc)
+
+                    else:
+
+                        flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
+
+                                                 self.dirmodel[channels[i]]['point'], self.names,
+
+                                                 self.dirmodel[channels[i]]['label'])
+
+            if flag:
+
+                #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
+
+                self.dirmodel[channels[i]]['detframe'].pop(0)
+
+                self.dirmodel[channels[i]]['detframe'].append(1)
+
+                self.dirmodel[channels[i]]['preim'] = annotator.result()
+
+                self.dirmodel[channels[i]]['oripreim'] = imc
+
+                self.dirmodel[channels[i]]['posttime'] = videotime[i]
+
+                print(self.dirmodel[channels[i]]['detframe'])
+
+                #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
+
+            else:
+
+                #print(f'deti= {i}')
+
+                #print(detframe[i])
+
+                #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
+
+                self.dirmodel[channels[i]]['detframe'].pop(0)
+
+                self.dirmodel[channels[i]]['detframe'].append(0)
+
+                print(self.dirmodel[channels[i]]['detframe'])
+
+                #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
+
+                #print(detframe[i])
+
+            # Stream results
+
+            #im0 = annotator.result()
+
+            #print(f'i = {i}')
+
+            #print(channels[i])
+
+            #print(postpretime[i])
+
+            #print(detframe[i])
+
+            if not self.dirmodel[channels[i]]['detflag'] and self.dirmodel[channels[i]]['detframe'].count(1)>=1:
+
+                self.dirmodel[channels[i]]['detflag'] = True
+
+                self.dirmodel[channels[i]]['detpretime'] = time.time()
+
+            elif self.dirmodel[channels[i]]['detframe'].count(1)==0 :
+
+                self.dirmodel[channels[i]]['detflag'] = False
+
+                self.dirmodel[channels[i]]['detpretime'] = float('inf')
+
+            # Stream results
+
+            #im0 = annotator.result()
+
+            if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and time.time() - self.dirmodel[channels[i]]['detpretime'] > self.dirmodel[channels[i]]['durtime']  and self.dirmodel[channels[i]]['detflag']:
+
+            #print()
+
+            #if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and self.dirmodel[channels[i]]['detflag']:
+
+                #print(time.time() - self.dirmodel[channels[i]]['detpretime'])
+
+                #print(self.dirmodel[channels[i]]['detflag'])
+
+                print('post-------------------------------------------------------------------------')
+
+                #time.sleep(30)
+
+             #print(time.time() - postpretime[i])
+
+                #print('111111111111111111111111111111111111111111111111')
+
+                #print(dirmodel[channels[i]]['preim'].shape)
+
+                success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim'])
+
+                content = encoded_image.tobytes()
+
+                successori, encoded_imageori = cv2.imencode('.jpg', self.dirmodel[channels[i]]['oripreim'])
+
+                contentori = encoded_imageori.tobytes()
+
+                filename = f'{p.stem}_{int(time.time())}.jpg'
+
+                filenameori = f'ori{p.stem}_{int(time.time())}.jpg'
+
+                print(f'str(p) {p.name}')
+
+                print(channels[i])
+
+                payload = {'channel': self.dirmodel[channels[i]]['channel'],
+
+                                   'classIndex': self.dirmodel[channels[i]]['classindex'],
+
+                                   'ip': self.dirmodel[channels[i]]['algip'],
+
+                                   'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', self.dirmodel[channels[i]]['posttime']),
+
+                                   'videoUrl': channels[i]}
+
+                files = [
+
+                            ('file', (filename, content, 'image/jpeg')),
+
+                            ('oldFile', (filenameori, contentori, 'image/jpeg')),
+
+                        ]
+
+                try:
+
+                    result = requests.post(url, data=payload, files=files)
+
+                    print(result)
+
+                except Exception:
+
+                    print('posterror')
+
+                #time.sleep(3000)
+
+                self.dirmodel[channels[i]]['postpretime'] = time.time()
+
+                self.dirmodel[channels[i]]['detflag'] = False
+
+                timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime'])
+
+                year = time.strftime('%Y',time.localtime(time.time()))
+
+                month = time.strftime('%m',time.localtime(time.time()))
+
+                day = time.strftime('%d',time.localtime(time.time()))
+
+                savefold = f'/mnt/project/images/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
+
+                savefold = Path(savefold)
+
+                savefold.mkdir(parents=True,exist_ok=True)
+
+                detsavefold = f'/mnt/project/detimages/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
+
+                detsavefold = Path(detsavefold)
+
+                detsavefold.mkdir(parents=True,exist_ok=True)
+
+                cv2.imwrite(f'{savefold}/{timesave}.jpg',self.dirmodel[channels[i]]['oripreim'])
+
+                cv2.imwrite(f'{detsavefold}/{timesave}det.jpg',self.dirmodel[channels[i]]['preim'])
+
+            #if self.dirmodel[channels[i]]['detframe'].count(1)==0:
+
+            #    self.dirmodel[channels[i]]['detflag'] = False
+
+                #time.sleep(1)
+
+
+
+            self.view_img = False
+
+            if self.view_img:
+
+                if platform.system() == 'Linux' and p not in windows:
+
+                    windows.append(p)
+
+                    cv2.namedWindow(f'{str(p)}-{Path(self.opt.weights).stem}',
+
+                                    cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
+
+                    cv2.resizeWindow(f'{str(p)}-{Path(self.opt.weights).stem}', im0.shape[1], im0.shape[0])
+
+                im1 = cv2.resize(im0, (1280, 720))
+
+                cv2.imshow(f'{str(p)}-{Path(self.opt.weights).stem}', im1)
+
+                cv2.waitKey(1)  # 1 millisecond
+
+
+
+            # Save results (image with detections)
+
+
+
+            # Print time (inference-only)
+
+            print(f'channels[i]={channels[i]}')
+
+            LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms {str(p)}-{Path(self.opt.weights).stem}")
+
+
+
+    def load(self):
+
+        conn = sqlite3.connect(self.source)
+
+        c = conn.cursor()
+
+        while True:
+
+            #
+
+            # print ("数据库打开成功")
+
+
+
+            cursor = c.execute(
+
+                "SELECT modelname, addstream,delstream,streaming  from CHANGESTREAM WHERE modelname= (?)",(Path(self.opt.weights).stem,))
+
+            # content = cursor.fetchall()
+
+            # if content[0][1] ==1 or content[0][2] ==1:
+
+            #     c.execute("UPDATE CHANGESTREAM set streamimg = 1 where modelname='yolov5s'")
+
+            #     print("updata changestream")
+
+            #     conn.commit()
+
+            # cursor = c.execute(
+
+            # "SELECT modelname, addstream,delstream,streamimg  from CHANGESTREAM WHERE modelname='yolov5s'")
+
+            self.contentid = cursor.fetchall()
+
+            #global tag
+
+            #tag = Value('i', self.contentid[0][3])
+
+            #print(tag.value==1)
+
+            print(f'loadcontent={self.contentid[0][3]}')
+
+            time.sleep(3)
+
+        c.close()
+
+        conn.close()
+
+    def readpoint(self):
+
+        data = {
+
+                    "algorithmCode": modelalgdir[Path(self.opt.weights).stem],
+
+                    "deviceIp":None,
+
+                    'fwqCode':None
+
+                }
+
+        self.dirmodel = {}
+
+        result = requests.post(url=urlt,data=data).json()['data']
+
+        channell=[]
+
+        for info in result: 
+
+        #content = cursor.fetchall()
+
+        #self.dirmodel = {}
+
+        #for address,fence,point,channel,classindex,ip ,algip,label,durtime in content:
+
+            #address = f'{address[:-1]}0'
+
+            channel = info["deviceChannel"]
+
+            if Path(self.opt.weights).stem == "danager" and channel =='45':
+
+                continue
+
+            channell.append(channel)
+
+            self.dirmodel[channel] = {}
+
+            self.dirmodel[channel]['fence'] = 1 if len(info["electricFence"])>0 else 0
+
+            if Path(self.opt.weights).stem == "uniform":
+
+                self.dirmodel[channel]['fence'] = 1
+
+            #self.dirmodel[channel]['point'] = point
+
+            self.dirmodel[channel]['channel'] = info['deviceChannel']
+
+            self.dirmodel[channel]['classindex'] = info['algorithmCode']
+
+            self.dirmodel[channel]['ip'] = info['deviceIp']
+
+            self.dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
+
+            dataele = {
+
+                    "algorithmCode": self.dirmodel[channel]['classindex'],
+
+                    "algorithmIp":self.dirmodel[channel]['algip'],
+
+                    "channel":self.dirmodel[channel]['channel']
+
+                }
+
+            resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
+
+            resultele = resultele.split(',||')
+
+            resultele = tuple(resultele)
+
+            point = '%s:'*len(resultele) %resultele
+
+            if Path(self.opt.weights).stem == 'personcount':
+
+                resultper = requests.post(url=urlperson,data=dataele).json()['data']
+
+                personcountdir[channel] = int(resultper)
+
+            if (Path(self.opt.weights).stem == "uniform" or Path(self.opt.weights).stem == "fall") and len(point[:-2])<=1:
+
+                self.dirmodel[channel]['point'] = "150#144,1100#144,1100#550,150#550"
+
+            else:
+
+                self.dirmodel[channel]['point'] = point[:-2]
+
+            self.dirmodel[channel]['preim'] = None
+
+            self.dirmodel[channel]['oripreim'] = None
+
+            self.dirmodel[channel]['detframe'] = [0 for _ in range(2)]
+
+            self.dirmodel[channel]['postpretime'] = 0
+
+            self.dirmodel[channel]['detflag'] = False
+
+            self.dirmodel[channel]['detpretime'] = float('inf')
+
+            self.dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
+
+            if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty' :
+
+                datatime= {
+
+                        "algorithmCode": self.dirmodel[channel]['classindex'],
+
+                        "algorithmIp":self.dirmodel[channel]['algip'],
+
+                        "channel":self.dirmodel[channel]['channel']
+
+                        }
+
+                resulttime = requests.post(url=urltime,data=dataele).json()['data']
+
+                self.dirmodel[channel]['durtime'] = int(resulttime)
+
+            else:
+
+                self.dirmodel[channel]['durtime'] = 0
+
+            self.dirmodel[channel]['posttime'] = 0
+
+        print(self.dirmodel)
+
+        return sorted(channell)
+
+    #    str = str.split(":")
+
+    #    lista = []
+
+    #    for liststr in str:
+
+    #        if len(liststr) > 0:
+
+    #            li = liststr.split(',')
+
+    #            listpoint = []
+
+    #            for i, j in zip(li[::2], li[1::2]):
+
+    #                listpoint.append((i, j))
+
+    #            lista.append(listpoint)
+
+    #    return listpoint
+
+
+
+
+
+#def preprocess():
+
+#        print('preprocess-----------------------------------------------')
+
+
+
+def getframe(queuelist,channelsl,source,tt,numworks,lock,numworkv):
+
+    while True:
+
+        print("dataloader")
+
+        imgsz = [768, 768]
+
+        print(f'source = {source}')
+
+        dataset = LoadStreamsSQLTN(channelsl,source, img_size=832,
+
+                                      auto=True, vid_stride=20, tt=tt,numworks = numworks)
+
+        bs = len(dataset)
+
+        vid_path, vid_writer = [None] * bs, [None] * bs
+
+        # self.detframe = [[0 for _ in range(8)] for i in range(bs)]
+
+        # self.postpretime = [0]*bs
+
+        # Run inference
+
+
+
+        #imgsz = (1 , 3, *self.imgsz)
+
+        print(imgsz)
+
+        #self.model.warmup(imgsz=(1 , 3, *imgsz))  # warmup
+
+        seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+
+
+
+        #
+
+        # print ("数据库打开成功")
+
+        pretime = time.time()
+
+        tag = 0
+
+        sourcebase = 'project0117.db'
+
+        for path, im, im0s, vid_cap, s, videotime,channels in dataset:
+
+            # print('*'*21)
+
+            # global tag
+
+            # print('datasetvideo')
+
+            # if time.time()-pretime > 300:
+
+            #     pretime = time.time()
+
+
+
+            #     conn = sqlite3.connect(sourcebase)
+
+            #     c = conn.cursor()
+
+            #     cursor = c.execute("SELECT modelname, addstream,delstream,streaming  from CHANGESTREAM WHERE modelname= 'helmet'")
+
+            #     contentid = cursor.fetchall()
+
+            #     tag = contentid[0][3]
+
+            # if tag == 1:
+
+            #     lock.acquire()
+
+            #     numworkv.value += 1
+
+            #     dataset.close()
+
+            #     if numworkv.value==3:
+
+            #         print('newstreaming=', tag)
+
+            #         conn = sqlite3.connect(source)
+
+            #         c = conn.cursor()
+
+            #         c.execute("UPDATE CHANGESTREAM set streaming = 0 , addstream=0,delstream=0 where modelname='helmet'")
+
+            #         print(123)
+
+            #         conn.commit()
+
+            #         c.close()
+
+            #         conn.close()
+
+            #     lock.release()
+
+            #     print('opencv1')
+
+            #     # cv2.destroyAllWindows()
+
+            #     print('opencv')
+
+            #     break
+
+            # else:
+
+            #     print('nonewstreaming=', tag)
+
+            if time.time()-pretime > 300:
+
+                channellist = []
+
+                pretime = time.time()
+
+                data = {
+
+                    "algorithmCode": None,
+
+                    "deviceIp":None,
+
+                    "fwqCode":None
+
+
+
+                }
+
+                try:
+
+                    result = requests.post(url=urlt,data=data).json()['data']
+
+                except Exception:
+
+                    result = []
+
+                for info in result:
+
+                    data = {
+
+                        "channel": info["deviceChannel"],
+
+                    "ip": info["deviceAlgorithmIp"]
+
+                    }
+
+                    chaflag = any(info["deviceChannel"] in t for t in channellist)
+
+        #personcountdir[channel] = num
+
+                    if not chaflag:
+
+                        address = requests.post(url=urlrtsp,data=data).json()['msg']
+
+                        channellist.append((info['deviceChannel'],address))
+
+                channelsa  = []
+
+                sourcea = []
+
+                channellist = set(channellist)
+
+                channellist = sorted(channellist,key=lambda x:x[0])
+
+                #channellist = set(channellist)
+
+                for cha,add in channellist:
+
+                    channelsa.append(cha)
+
+                    sourcea.append(add)
+
+                channelsl = sorted(channelsl)
+
+                #channelsa = sorted(channelsa)
+
+                if channelsa!=channelsl and len(channelsa)>0:
+
+                    print(f'channelsa = {channelsa}')
+
+                    print(f'channelsl = {channelsl}')
+
+                    dataset.close()
+
+                    channelsl = channelsa
+
+                    source = sourcea
+
+                    break;
+
+            for key,value in queuelist.items():
+                hour = time.localtime(time.time()).tm_hour
+                if hour in range(7,18):
+                    value[-1].put((path, im, im0s, vid_cap, s, videotime,channels))
+
+                    value[-1].get() if value[-1].qsize() == 10 else time.sleep(0.001)
+
+
+
+def getmutpro(channels,source,streamlist,numworkv,lock,numworks=1,modellen=None):
+
+    processlist = []
+
+    queuelist = {}
+
+    for i in range(numworks):
+
+        for model in modellen:    
+
+            queue = Queue(maxsize=10)
+
+            queuelist.setdefault(model,[])
+
+            queuelist[model].append(queue)
+
+        process = Process(target=getframe,
+
+                        args=(queuelist, channels,source, i,numworks,lock,numworkv))
+
+        processlist.append(process)
+
+        process.start()
+
+        #queuelist.append(queue)
+
+    return queuelist
+
+
+
+#    path = []
+
+#    im0s = []
+
+#    vid_cap = None
+
+#    s = ''
+
+#    videotime = []
+
+#    while True:
+
+#        imlist = []
+
+#        pathlist = []
+
+#        im0slist = []
+
+#        channelslist = []
+
+#        vid_cap = None
+
+#        s = ''
+
+#        videotimelist = []
+
+#        for q in queuelist:
+
+#            if q.qsize()>0:
+
+#                setframe = q.get()
+
+#                path, im, im0s, vid_cap, s, videotime ,channels = setframe
+
+#                pathlist += path
+
+#                channelslist +=channels 
+
+#                im0slist += im0s
+
+#                videotimelist += videotime
+
+#                imlist.append(im)
+
+#        if len(imlist)>0:
+
+#            im = np.concatenate(imlist)
+
+#        if len(pathlist)>0:
+
+#            print(len(path),im.shape,len(im0s))
+
+#            streamlist.append((pathlist, im, im0slist, vid_cap, s, videotimelist,channelslist))
+
+            #print(f'streamlist = {len(streamlist)}')
+
+#        streamlist.pop(0) if len(streamlist) > 3 else time.sleep(0.001)
+
+
+
+def modelfun(queue,weights,sourcedb,classes,device,classify,conf_thres,runmodel,v8=False):
+
+    print(weights)
+
+    detectdemo=Detect(weights=weights,source=sourcedb,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
+
+    detectdemo.infer(queue,runmodel)
+
+
+
+def parse_opt():
+
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
+
+    opt = parser.parse_args()
+
+    return opt
+
+
+
+
+
+def main(opt):
+
+    check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+
+    run(**vars(opt))
+
+
+
+
+
+if __name__ == '__main__':
+
+    #torch.multiprocessing.set_start_method('spawn')
+
+    #set_start_method('spawn')
+
+    opt = parse_opt()
+
+    dbpath = 'projectnew.db'
+
+    
+
+    conn = sqlite3.connect(dbpath)
+
+#
+
+# print ("数据库打开成功")
+
+    c = conn.cursor()
+
+    task(c,conn,urlt,urla)
+
+    cursor = c.execute('select channel,algip  from stream ')
+
+    result = cursor.fetchall()
+
+    for channel ,algip in result:
+
+        data = {
+
+        "channel": channel,
+
+        "ip":algip
+
+        }
+
+        #personcountdir[channel] = num
+
+        address = requests.post(url=urlrtsp,data=data).json()['msg']
+
+        c.execute('UPDATE STREAM set address= (?) where channel =(?)',(address,channel))
+
+    conn.commit()
+
+    cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname ='duty'  or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname= 'arm' or modelname = 'persontre' or modelname = 'bag'")
+
+    #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+
+    content = cursor.fetchall()
+
+    cursor = c.execute("SELECT address,channel from STREAM ")
+
+    #cursor = c.execute("SELECT address from STREAM where modelname = 'helmet'")
+
+    contenta = cursor.fetchall()
+
+    source = []
+
+    modellist = []
+
+    addcha = []
+
+    channellist = []
+
+    for i in contenta:
+
+        addcha.append((i[0],i[1]))
+
+        #modellist.append(i[1])
+
+    addcha = set(addcha)
+
+    addcha = sorted(addcha,key=lambda x:x[1])
+
+    for add,cha in addcha:
+
+        source.append(add)
+
+        channellist.append(cha)
+
+    #source = set(source)
+
+    print(addcha)
+
+    source = list(source)
+
+    cursor = c.execute("SELECT modelname from STREAM where (modelname ='helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname = 'duty'  or modelname = 'sleep'  or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag')")
+
+    contentm = cursor.fetchall()
+
+    for m in contentm:
+
+        modellist.append(m[0])
+
+    modellist = set(modellist)
+
+    modellist = list(modellist)
+
+    contentlist = []
+
+    for i in content:
+
+        contentlist.append(i[0])
+
+    #source.sort()
+
+    n = len(content)
+
+    print(f'modelname={n}')
+
+    print(content)
+
+    #content.reverse()
+
+    print(content)
+
+    print(source)
+
+    # main(opt)
+
+    #processes = []
+
+    streamqueue = Queue(maxsize=4)
+
+    numworkv = Value('i', 0)
+
+    manager = Manager()
+
+    lock = multiprocessing.Lock()
+
+    streamlist = manager.list()
+
+    numworks = 7
+
+    modellen = []
+
+    for i in modellist:
+
+        if i in contentlist:
+
+            modellen.append(i)
+
+    queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks,modellen)
+
+    deid = 0
+
+    #pool = ThreadPoolExecutor(max_workers=n)
+
+    runmodel = manager.dict()
+
+    while True:
+
+        for i in modellist:
+
+            if i in contentlist:
+
+                if i not in runmodel:
+
+            #print(i)
+
+            #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
+
+                    c.execute('select conf,cla from changestream where modelname = (?)',(i,))
+
+                    rea = c.fetchall()
+
+                    print(f'weights ={i[0]}.pt')
+
+                    if i in ['duty','danager','inspection','cross','personcount']:
+
+                        process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,[0],0,rea[0][1],rea[0][0],runmodel,True))
+
+                    else:
+
+                        if i in ['fall','extinguisher']:
+
+                            process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel))
+
+                        else:
+
+                            process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel,True))
+
+
+
+                #elif i in ['helmet','fire','smoke','fall']:
+
+                #process = Process(target=modelfun,args=(queuelist,f'{i}.pt',dbpath,None,deid%2,True))
+
+                #else:
+
+                #process = Process(target=modelfun,args=(queuelist,f'{i}.pt',dbpath,None,deid%2,False))
+
+
+
+                #processes.append(process)
+
+                #process.start()
+
+                #detectobj = Process(target=detectdemo.infer,args=(queue,))
+
+                # Detect(weights=f'{i[0]}.pt')
+
+
+
+                    time.sleep(3)
+
+                    process.start()
+
+                    deid = deid+1
+
+                    runmodel[i] = 1
+
+        time.sleep(600)
+
+        task(c,conn,urlt,urla)
+
+        #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'fall' or modelname = 'uniform' or modelname = 'personcount' or modelname = 'arm' or modelname = 'bag'")
+
+        #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+
+        cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname ='duty'  or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag'")
+
+        content = cursor.fetchall()
+
+        contentlist = []
+
+        for con in content:
+
+            contentlist.append(con[0])
+
+        #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
+
+        cursor = c.execute("SELECT address,channel from STREAM ")
+
+        contenta = cursor.fetchall()
+
+        source = []
+
+        modellist = []
+
+        addcha = []
+
+        channellist = []
+
+        for i in contenta:
+
+            addcha.append((i[0],i[1]))
+
+            #modellist.append(i[1])
+
+        addcha = set(addcha)
+
+        addcha = sorted(addcha)
+
+        for a,cha in addcha:
+
+            source.append(a)
+
+            channellist.append(cha)
+
+        print(addcha)
+
+        #source = set(source)
+
+        source = list(source)
+
+        #source.sort()
+
+        cursor = c.execute("SELECT modelname from STREAM where (modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname = 'duty'  or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag')")
+
+        contentm = cursor.fetchall()
+
+        for m in contentm:
+
+            modellist.append(m[0])
+
+        modellist = set(modellist)
+
+        n = len(content)
+
+        print(f'modelname={n}')
+
+        print(content)
+
+        #content.reverse()
+
+        print(content)
+
+        #pool.submit(detectobj.infer)
+
+
+
+    #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
+
+    #content = cursor.fetchall()
+
+    #n = len(content)
+
+    #print(f'modelname={n}')
+
+    #print(content)
+
+    #content.reverse()
+
+    #print(content)
+
+    # main(opt)
+
+    #processes = []
+
+    #pool = ProcessPoolExecutor(max_workers=n)
+
+    #for i in content:
+
+        #print(i)
+
+        #detectdemo=Detect(weights=f'{i[0]}.pt')
+
+        #process = Process(target=detectdemo.infer)
+
+        #processes.append(process)
+
+        #process.start()
+
+        #detectobj = Detect(weights=f'{i[0]}.pt')
+
+    #    time.sleep(3)
+
+        #pool.submit(detectobj.infer)
+
+    #    print('111111111111111111111111111111111111111111111111111111111')
+
+        #pool.submit(TestA().func1, '张三', i)
+
+        #print('----------------------------------------------------------------')
+
+    #time.sleep(3000)
+
+    # 等待所有进程执行完毕
+
+    #for process in processes:
+
+    #    process.join()
+
+
+
+        #pool.submit(Detect(weights=f'{i[0]}.pt').infer)
+
+    # if isinstance(opt.weights,list):
+
+    #     opt.weights = opt.weights[0]
+
+    #signal.signal(signal.SIGINT, my_handler)
+
+    #detectdemo1 = Detect(weights=f'{content[0][0]}.pt')
+
+    #detectdemo1.infer()
+
+    #a = Test
+
+    #with ProcessPoolExecutor(3) as ppool:
+
+        #for i in range(3):
+
+    #        print('hello')
+
+                        #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
+
+            #ppool.submit(TestA().func1, '张三', i)
+
+    #ta = TestA()
+
+    #with ProcessPoolExecutor(5) as ppool: # 创建一个5个进程的进程池
+
+    #    for i in range(1, 4):
+
+    #        ppool.submit(func1, '张三', i)
+
+    #f1= pool.submit(detectdemo1.infer)
+
+    # print("线程1-----------------------------------------------------------------------------------")
+
+    #detectdemo2 = Detect(weights=r"helmet.pt")
+
+    #f2=pool.submit(detectdemo2.infer)
+
+    # print("线程2-------------------------------------------------------------------------------------")
+
+    #detectdemo3 = threading.Thread(target=detectdemo3.infer)
+
+    #detectdemo3 = Detect(weights=r"fall.pt")
+
+    #f3=pool.submit(detectdemo3.infer)
+
+

+ 2161 - 0
detectopencvmutbig.py

@@ -0,0 +1,2161 @@
+# YOLOv5 �� by Ultralytics, AGPL-3.0 license
+
+"""
+
+Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
+
+
+
+Usage - sources:
+
+    $ python detect.py --weights yolov5s.pt --source 0                               # webcam
+
+                                                     img.jpg                         # image
+
+                                                     vid.mp4                         # video
+
+                                                     screen                          # screenshot
+
+                                                     path/                           # directory
+
+                                                     list.txt                        # list of images
+
+                                                     list.streams                    # list of streams
+
+                                                     'path/*.jpg'                    # glob
+
+                                                     'https://youtu.be/Zgi9g1ksQHc'  # YouTube
+
+                                                     'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
+
+
+
+Usage - formats:
+
+    $ python detect.py --weights yolov5s.pt                 # PyTorch
+
+                                 yolov5s.torchscript        # TorchScript
+
+                                 yolov5s.onnx               # ONNX Runtime or OpenCV DNN with --dnn
+
+                                 yolov5s_openvino_model     # OpenVINO
+
+                                 yolov5s.engine             # TensorRT
+
+                                 yolov5s.mlmodel            # CoreML (macOS-only)
+
+                                 yolov5s_saved_model        # TensorFlow SavedModel
+
+                                 yolov5s.pb                 # TensorFlow GraphDef
+
+                                 yolov5s.tflite             # TensorFlow Lite
+
+                                 yolov5s_edgetpu.tflite     # TensorFlow Edge TPU
+
+                                 yolov5s_paddle_model       # PaddlePaddle
+
+"""
+
+import matplotlib.path as mat
+
+import requests
+
+import argparse
+
+import os
+
+import platform
+
+import sqlite3
+
+import sys
+
+import threading
+
+import time
+
+from pathlib import Path
+
+import signal
+
+import torch
+
+from concurrent.futures import ThreadPoolExecutor
+
+from concurrent.futures import ProcessPoolExecutor
+
+from multiprocessing import Process,Manager,Value
+
+from multiprocessing import Queue
+
+from multiprocessing import set_start_method
+
+import multiprocessing
+
+import multiprocessing as mp
+
+import numpy as np
+
+from torchvision import transforms
+
+FILE = Path(__file__).resolve()
+
+ROOT = FILE.parents[0]  # YOLOv5 root directory
+
+if str(ROOT) not in sys.path:
+
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+
+
+
+from models.common import DetectMultiBackend
+
+from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLT,LoadStreamsSQLTN
+
+from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
+
+                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,apply_classifier1,apply_classifieruniform,compute_IOU,task,apply_classifierarm)
+
+from utils.plots import Annotator, colors, save_one_box
+
+from utils.torch_utils import select_device, smart_inference_mode
+
+from utils.renwu import newHelmet,newUniform,Fall,Personcount,Arm,Bag,Cross,Extinguisher,Persontre,Bag,Danager
+from transformers import AutoProcessor, AutoModelForVision2Seq
+#from testpool import func1,TestA
+
+
+
+# def my_handler(signum, frame):
+
+#     exit(0)
+
+#url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
+
+url = "http://172.19.152.231/open/api/operate/upload"
+
+urlrtsp = "http://172.19.152.231/open/api/operate/previewURLs"
+
+urlt = "http://172.19.152.231/open/api/operate/taskList"
+
+urla = "http://172.19.152.231/open/api/operate/algorithmList"
+
+urlele = "http://172.19.152.231/open/api/operate/fence"
+
+urltime = "http://172.19.152.231/open/api/operate/getTime"
+
+urlperson = "http://172.19.152.231/open/api/operate/getPersonLimitNum"
+
+#modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person'}
+
+#algmodel = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51'}
+
+modelnamedir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'duty','98':'face','55':'oil','52':'jingdian','53':'rope','54':'personcar','39':'inspection','11':'reflective','12':'phone','66':'extinguisher','67':'tizi','68':'menjin','35':'arm','36':'persontre','33':'bag'}
+
+modellabeldir = {'0':'head,person','8':'person','10':'black_work_clothes,blue_work_clothes,person','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'person','98':'face','55':'oil','52':'person,hand,ball','53':'rope','54':'person','39':'person','11':'blue,greent,whitet,bluecoat,whitebarcoat,graycoat,baoan,chenyi,other','12':'phone','66':'extinguisher','67':'person,tizi','68':'person','35':'barearm,arm','36':'person,foot,cart,bag,box','33':'handbox,handbag'}
+
+modelalgdir = {}
+
+personcountdir = {}
+
+for key,value in modelnamedir.items():
+
+    modelalgdir[value] = key
+
+taskmap = {'helmet':newHelmet,'uniform':newUniform,'fall':Fall,'personcount':Personcount,'arm':Arm,'bag':Bag,'cross':Cross,'extinguisher':Extinguisher,'persontre':Persontre,'bag':Bag,'danager':Danager}
+
+mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
+
+test = transforms.Compose([transforms.Resize((224,224)),
+
+        #transforms.CenterCrop(224),
+
+        transforms.ToTensor(),
+
+        transforms.Normalize(mean=mean, std=std)
+
+                           ])
+
+def clapre(modelcla,claimg,clapoint):
+
+    imgten = torch.stack(claimg,dim=0)
+
+    clapoint = torch.stack(clapoint,dim=0)
+
+    imgten = imgten.to(0)
+
+    result = modelcla(imgten)
+
+    result = F.softmax(result)
+
+    print(result)
+
+    index = result.argmax(1)
+
+    index = index.cpu().numpy()
+
+    index = np.argwhere(index<5)
+
+    index = index.reshape(-1)
+
+    print(index)
+
+    if len(index)>0:
+
+        print(clapoint[index])
+
+        return clapoint[index]
+
+    else:
+
+        return None
+
+
+
+class YoloOpt:
+
+    def __init__(self, weights=ROOT / 'yolov5s.pt',source=ROOT / 'data/images',data=ROOT / 'data/coco128.yaml',
+
+                 imgsz=(640,640),
+
+                 conf_thres=0.25,
+
+                 iou_thres=0.45,
+
+                 max_det=1000,
+
+                 device='',
+
+                 view_img=False,
+
+                 save_txt=False,
+
+                 save_conf=False,
+
+                 save_crop=False,
+
+                 nosave=True,
+
+                 classes=None,
+
+                 agnostic_nms=False,
+
+                 augment=False,
+
+                 visualize=False,
+
+                 update=False,
+
+                 project=ROOT / 'runs/detect',
+
+                 name='exp',
+
+                 exist_ok=False,
+
+                 line_thickness=1,
+
+                 hide_labels=False,
+
+                 hide_conf=False,
+
+                 half=False,
+
+                 dnn=False,
+
+                 vid_stride=10,
+
+                 classify=False,
+
+                 v8=False):
+
+
+
+        self.weights = weights  # 权重文件地址
+
+        self.source = source  # 待识别的图像
+
+        self.data = data
+
+        if imgsz is None:
+
+            self.imgsz = (640, 640)
+
+        self.imgsz = imgsz  # 输入图片的大小,默认 (640,640)
+
+        self.conf_thres = conf_thres  # object置信度阈值 默认0.25  用在nms中
+
+        self.iou_thres = iou_thres  # 做nms的iou阈值 默认0.45   用在nms中
+
+        self.device = device  # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法
+
+        self.view_img = view_img  # 是否展示预测之后的图片或视频 默认False
+
+        self.classes = classes  # 只保留一部分的类别,默认是全部保留
+
+        self.agnostic_nms = agnostic_nms  # 进行NMS去除不同类别之间的框, 默认False
+
+        self.augment = augment  # augmented inference TTA测试时增强/多尺度预测,可以提分
+
+        self.update = update  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+
+        self.exist_ok = exist_ok  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+
+        self.project = project  # 保存测试日志的参数,本程序没有用到
+
+        self.name = name  # 每次实验的名称,本程序也没有用到
+
+        self.max_det = max_det
+
+        self.save_txt = save_txt
+
+        self.save_conf= save_conf
+
+        self.save_crop= save_crop
+
+        self.nosave = nosave
+
+        self.visualize = visualize
+
+        self.line_thickness = line_thickness
+
+        self.hide_labels = hide_labels
+
+        self.hide_conf = hide_conf
+
+        self.half = half
+
+        self.dnn = dnn
+
+        self.vid_stride = vid_stride
+
+        self.classify = classify
+
+        self.v8 = v8
+
+class Detect:
+
+    def __init__(self, weights = ROOT / 'yolov5s.pt' , imgsz=(640,640),source="changshusql1103.db",classes=None,device=None,classify=False,conf_thres=0.25,v8=False):
+
+        print(f'detectweights = {weights}')
+
+        if v8:
+
+            from ultralytics.nn.autobackend import AutoBackend
+
+            from ultralytics.utils.ops import non_max_suppression
+
+        else:
+
+            from utils.general import non_max_suppression
+
+        self.opt = YoloOpt(weights=weights, imgsz=imgsz,source=source,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
+
+        self.source = str(self.opt.source)
+
+        self.save_img = not self.opt.nosave and not source.endswith('.txt')  # save inference images
+
+        is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
+
+        is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
+
+        self.webcam = self.source.isnumeric() or source.endswith('.db') or (is_url and not is_file)
+
+        screenshot = self.source.lower().startswith('screen')
+
+        if is_url and is_file:
+
+            self.source = check_file(self.source)  # download
+
+        self.save_dir = increment_path(Path(self.opt.project) / self.opt.name, exist_ok=self.opt.exist_ok)  # increment run
+
+        #self.save_dir = self.save_dir / Path(self.opt.weights).stem
+
+        #self.save_dir.mkdir(parents=True, exist_ok=True)
+
+        (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)  # make dir
+
+        print(f'device = {self.opt.device}')
+
+        device = select_device(self.opt.device)
+
+        if v8:
+
+            self.model = AutoBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+            if Path(weights).stem in ['arm', 'uniform']:
+
+                if Path(weights).stem == 'arm':
+
+                    self.personmodel = AutoBackend('yolov8m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+                elif Path(weights).stem == 'uniform':
+
+                    self.personmodel = AutoBackend('yolo11m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+        else:
+
+            self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+            if Path(weights).stem in ['helmet','arm']:
+
+                self.personmodel = DetectMultiBackend('personcount.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)        
+
+        self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
+
+        self.classify = classify
+
+        if self.classify:
+
+            #if Path(weights).stem =='uniform':
+
+            #    self.classifier_model = AutoBackend(f"{Path(weights).stem}cls.pt",device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+            #else:
+            if Path(weights).stem != "arm":
+                classifier_model = torch.load(f"{Path(weights).stem}cls.pt")
+
+                self.classifier_model = classifier_model.to(device)
+
+                self.classifier_model.eval()
+            else:
+                self.classifier_model = AutoBackend(f"{Path(weights).stem}cls.pt",device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+        self.imgsz = check_img_size(self.opt.imgsz, s=self.stride)
+
+        self.model.warmup(imgsz=(1 , 3, *self.imgsz))
+
+        self.readpoint()
+
+        print(self.imgsz)
+
+        self.updatetime = time.time()
+
+        self.updatemtime = time.time()
+
+        self.filetime = os.path.getmtime(self.opt.weights)
+
+        self.taskname = taskmap[Path(self.opt.weights).stem]()
+
+        bs = 1  # batch_size
+
+        if self.webcam:
+
+            #self.view_img = check_imshow(warn=True)
+
+            self.view_img = False
+
+            # dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
+
+            # bs = len(dataset)
+
+        elif screenshot:
+
+            dataset = LoadScreenshots(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt)
+
+        else:
+
+            dataset = LoadImages(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt, vid_stride=self.opt.vid_stride)
+
+        t1 = threading.Thread(target=self.load,daemon=True)
+
+        t1.start()
+
+    @smart_inference_mode()
+
+    def infer(self,queue,runmodel):
+
+            pretime = time.time()
+
+            seen, windows, self.dt = 0, [], (Profile(), Profile(), Profile())
+
+            #
+
+            # print ("数据库打开成功")
+
+            while True:
+
+                if time.localtime().tm_hour not in range(7,20):
+
+                    time.sleep(30)
+
+                    continue
+
+                #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
+
+                if time.time()-pretime>300:
+
+                    ret = self.readpoint()
+
+                    pretime = time.time()
+
+                    if not ret:
+
+                        print(f'{Path(self.opt.weights).stem} {runmodel}')
+
+                        runmodel.pop(Path(self.opt.weights).stem)
+
+                        print(f'{Path(self.opt.weights).stem} {runmodel}')
+
+                        break
+
+                print(f'queuelen = {len(queue)}')
+
+                for que in queue:
+
+                    if que.qsize() == 0:
+
+                        print('queuezero')
+
+                        time.sleep(0.01)
+
+                    if que.qsize() > 0:
+
+                        #if time.time()-pretime>300:
+
+                        #    ret = self.readpoint()
+
+                        #    pretime = time.time()
+
+                        #    if not ret:
+
+                        #        print(f'{Path(self.opt.weights).stem} {runmodel}')
+
+                        #        runmodel.pop(Path(self.opt.weights).stem)
+
+                        #        print(f'{Path(self.opt.weights).stem} {runmodel}')
+
+                        #        break
+
+                        setframe = que.get()
+
+                    # print('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')
+
+                    #if setframe is not None
+
+                        path, im, im0s, vid_cap, s, videotime ,channels = setframe
+
+                        algchannel = list(self.dirmodel.keys())
+
+                        print(algchannel)
+
+                        print(path)
+
+                        algchannel = np.array(algchannel)
+
+                        channelsnp = np.array(channels)
+
+                        algindex = np.where(np.in1d(channelsnp, algchannel))[0]
+
+                        algindex = list(algindex)
+
+
+
+                        path = np.array(path)
+
+                        path = path[algindex]
+
+                        path = path.tolist()
+
+                        channels = np.array(channels)
+
+                        channels = channels[algindex]
+
+                        channels = channels.tolist()
+
+                   # print(algindex)
+
+                        if len(algindex)==0:
+
+                            continue
+
+                    #for ia in algindex:
+
+                    #    print(type(im0s[ia]))
+
+                    #    print(im0s[ia].shape)
+
+                        im = im[algindex]
+
+                    #for ia in algindex:
+
+                    #    print(type(ia))
+
+                        try: 
+
+                            im0s = np.asarray(im0s)
+
+                        except Exception:
+
+                            im0s = np.asarray(im0s,dtype=object)
+
+                        print(im0s.shape)
+
+                        im0s = im0s[algindex]
+
+                    # im0s = im0s.tolist()
+
+                        print(f'algindex = {algindex}')
+
+                        print(f'im0s ={im0s[0].shape}')
+
+                        videotime = np.array(videotime)
+
+                        videotime = videotime[algindex]
+
+                        videotime = tuple(map(tuple, videotime))
+
+                    # global tag
+
+                    # if self.contentid[0][3] == 1 :
+
+                    #    dataset.close()
+
+                    #    print('newstreaming=', self.contentid[0][3])
+
+                    #    conn = sqlite3.connect(self.source)
+
+                    #    c = conn.cursor()
+
+                    #    c.execute("UPDATE CHANGESTREAM set streamimg = 0 , addstream=0,delstream=0 where modelname= (?)",(Path(self.opt.weights).stem,))
+
+                    #    print(123)
+
+                    #    conn.commit()
+
+                    #    c.close()
+
+                    #    conn.close()
+
+                    #    print('opencv1')
+
+                    # cv2.destroyAllWindows()
+
+                    #    print('opencv')
+
+                    #    break
+
+                    # else:
+
+                    #    print('nonewstreaming=', self.contentid[0][3])
+
+                        with self.dt[0]:
+
+                            im = torch.from_numpy(im).to(self.model.device)
+
+                            im = im.half() if self.model.fp16 else im.float()  # uint8 to fp16/32
+
+                            im /= 255  # 0 - 255 to 0.0 - 1.0
+
+                            if len(im.shape) == 3:
+
+                                im = im[None]  # expand for batch dim
+
+
+
+                    # Inference
+
+                        with self.dt[1]:
+
+                            visualize = increment_path(self.save_dir / Path(path).stem,
+
+                                                   mkdir=True) if self.opt.visualize else False
+
+                        #print('error')
+
+                        # print(self.model)
+
+                            pred = self.model(im, augment=self.opt.augment, visualize=visualize)
+
+                        self.postprocess(pred, path, im0s, im, s, videotime,channels)
+
+               # print(f'predshape= {')
+
+
+
+            # NMS
+
+                #processlist = []
+
+            #for i in range(3):
+
+            #    process = Process(target=self.postprocess,args=(pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s))
+
+            #    process = Process(target=self.preprocess)
+
+            #    process.start()
+
+            #    processlist.append(process)
+
+            #for j in processlist:
+
+            #    j.join()
+
+            #with ProcessPoolExecutor(3) as ppool:
+
+                #for i in range(3):
+
+            #        print('hello')
+
+                    #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
+
+                    #ppool.submit(func1, '张三', i)
+
+                    #ppool.submit(self.preprocess)
+
+            #self.postprocess(pred, path, im0s, dataset, im, s)
+
+            #process = Process(target=self.postprocess, args=(pred, path, im0s, dataset, im, s))
+
+                #self.postprocess(pred, path, im0s, im, s,videotime)
+
+                #process.start()
+
+    #def preprocess(self):
+
+    #    print('preprocess-----------------------------------------------')
+
+    def postprocess(self, pred, path, im0s,  im, s,videotime,channels):
+
+        
+
+        if time.time()-self.updatemtime>300:
+
+            if self.filetime !=os.path.getmtime(self.opt.weights):
+
+                device = select_device(self.opt.device)
+
+                print("load new load")
+
+                self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+
+                self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
+
+                self.filetime = os.path.getmtime(self.opt.weights)
+
+            #try:
+
+            #if  modelalgdir[Path(self.opt.weights).stem]!='0':
+
+            print(modelalgdir[Path(self.opt.weights).stem])
+
+            try:
+
+                rea = requests.post(url=urla,data={'algorithmCode':modelalgdir[Path(self.opt.weights).stem]}).json()['data']
+
+                con = rea[0]['confidence']
+
+                self.opt.conf_thres = con
+
+            except Exception:
+
+                print('error')
+
+            #else:
+
+            #    self.opt.conf_thres = 0.25
+
+            #except Exception:
+
+                #print('posturlaerror')
+
+            self.updatemtime = time.time()
+
+
+
+        seen = 0
+
+        # dt = (Profile(), Profile(), Profile())
+
+        print(f'senn = {seen}')
+
+        windows = []
+
+        if Path(self.opt.weights).stem:
+
+            labelnamelist = []
+
+        with self.dt[2]:
+
+            #print(f'cropshape={pred.shape}')
+
+            if self.opt.v8:
+
+                from ultralytics.utils.ops import non_max_suppression
+
+            else:
+
+                from utils.general import non_max_suppression
+
+            pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, self.opt.classes,
+
+                                       self.opt.agnostic_nms, max_det=self.opt.max_det)
+
+
+
+            # Second-stage classifier (optional)
+
+            # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
+
+            if self.classify and Path(self.opt.weights).stem!='persontre':
+
+                if Path(self.opt.weights).stem == 'arm':
+
+                    pred = apply_classifierarm(pred,self.classifier_model,im,im0s,Path(self.opt.weights).stem)
+
+                else:
+
+                    pred = apply_classifier1(pred,self.classifier_model,im,im0s,Path(self.opt.weights).stem)
+
+            # Process predictions
+
+            #print(f'predshape={pred.shape}')
+
+        for i, det in enumerate(pred):  # per image
+
+            if time.time()-self.updatetime>300:
+
+                dataele = {
+
+                    "algorithmCode": self.dirmodel[channels[i]]['classindex'],
+
+                    "algorithmIp":self.dirmodel[channels[i]]['algip'],
+
+                    "channel":self.dirmodel[channels[i]]['channel']
+
+                }
+
+                try:
+
+                    resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
+
+                    resultele = resultele.split(',||')
+
+                    resultele = tuple(resultele)
+
+                    point = '%s:'*len(resultele) %resultele
+
+                    if len(point[:-2])>1:
+
+                        self.dirmodel[channels[i]]['point'] = point[:-2]
+
+                except Exception:
+
+                    print('post error')
+
+                if Path(self.opt.weights).stem == 'personcount':
+
+                    try:
+
+                        resultper = requests.post(url=urlperson,data=dataele).json()['data']
+
+                        personcountdir[channels[i]] = int(resultper)
+
+                    except Exception:
+
+                        print('urlpersonerror')
+
+                if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty' :
+
+                    datatime= {
+
+                                    "algorithmCode": self.dirmodel[channels[i]]['classindex'],
+
+                                    "algorithmIp":self.dirmodel[channels[i]]['algip'],
+
+                                    "channel":self.dirmodel[channels[i]]['channel']
+
+                                }
+
+                    try:
+
+                        resulttime = requests.post(url=urltime,data=dataele).json()['data']
+
+                        self.dirmodel[channel]['durtime'] = int(resulttime)
+
+                    except Exception:
+
+                        print('posttime error')
+
+                self.updatetime = time.time()
+
+            seen += 1
+
+            if self.webcam:  # batch_size >= 1
+
+                p, im0 = path[i], im0s[i].copy()
+
+                s += f'{i}: '
+
+            else:
+
+                p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
+
+
+
+            p = Path(p)  # to Path
+
+
+
+            save_path = str(self.save_dir / p.name)  # im.jpg
+
+            #txt_path = str(self.save_dir / 'labels' / p.stem) + (
+
+               # '' #if dataset.mode == 'image' else f'_{frame}')  # im.txt
+
+            s += '%gx%g ' % im.shape[2:]  # print string
+
+            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
+
+            imc = im0.copy()  # for save_crop
+
+            annotator = Annotator(im0, line_width=self.opt.line_thickness, example=str(self.names))
+
+            flag = False
+
+            if len(det) and Path(self.opt.weights).stem != 'duty':
+
+                #flag = True
+
+                # Rescale boxes from img_size to im0 size
+
+                det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
+
+
+
+                # Print results
+
+                for c in det[:, 5].unique():
+
+                    n = (det[:, 5] == c).sum()  # detections per class
+
+                    s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, "  # add to string
+
+
+
+                # Write results
+
+                if Path(self.opt.weights).stem in ['arm', 'uniform']:
+
+                    personpred = self.personmodel(im[i][None], None, None)
+
+                    personpred = non_max_suppression(personpred, 0.7, self.opt.iou_thres, 0,
+
+                                                     self.opt.agnostic_nms, max_det=self.opt.max_det)
+
+                    if len(personpred[0])==0:
+
+                       flag = False
+
+                    elif Path(self.opt.weights).stem == 'other':
+
+                        persondet = []
+
+                        personpred =  personpred[0]
+
+                        personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
+
+                        for *perxyxy,conf,cls in reversed(personpred):
+
+                            print(perxyxy)
+
+                            x1,y1,x3,y3 = perxyxy
+
+                            x1,y1,x3,y3 = int(x1),int(y1),int(x3),int(y3)
+
+                            x2,y2 = x3,y1
+
+                            x4,y4 = x1,y3
+
+                        flag = self.taskname.getflag(det, persondet,annotator, self.dirmodel[channels[i]]['fence'],
+
+                                                 self.dirmodel[channels[i]]['point'], self.names,
+
+                                                 self.dirmodel[channels[i]]['label'])
+
+    
+
+                    else:
+
+                        persondet = []
+
+                        personpred =  personpred[0]
+
+                        personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
+
+                        for *perxyxy,conf,cls in reversed(personpred):
+
+                            print(perxyxy)
+
+                            if conf<0.8:
+
+                                continue
+
+                            x1,y1,x3,y3 = perxyxy
+
+                            x1,y1,x3,y3 = int(x1),int(y1),int(x3),int(y3)
+
+                            x2,y2 = x3,y1
+
+                            x4,y4 = x1,y3
+
+                            persondet.append([x1,y1,x2,y2,x3,y3,x4,y4])
+
+                        flag = self.taskname.getflag(det,persondet,annotator,self.dirmodel[channels[i]]['fence'],self.dirmodel[channels[i]]['point'],self.names,self.dirmodel[channels[i]]['label'])
+
+                else:
+
+                    if Path(self.opt.weights).stem in ['personcount']:
+
+                        flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
+
+                                                 self.dirmodel[channels[i]]['point'], self.names,
+
+                                                 self.dirmodel[channels[i]]['label'],personcountdir[channels[i]])
+
+                    elif Path(self.opt.weights).stem in ['persontre']:
+
+                        flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
+
+                                                 self.dirmodel[channels[i]]['point'], self.names,
+
+                                                 self.dirmodel[channels[i]]['label'],1,imc)
+
+                    else:
+
+                        flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
+
+                                                 self.dirmodel[channels[i]]['point'], self.names,
+
+                                                 self.dirmodel[channels[i]]['label'])
+
+            if flag:
+
+                #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
+
+                self.dirmodel[channels[i]]['detframe'].pop(0)
+
+                self.dirmodel[channels[i]]['detframe'].append(1)
+
+                self.dirmodel[channels[i]]['preim'] = annotator.result()
+
+                self.dirmodel[channels[i]]['oripreim'] = imc
+
+                self.dirmodel[channels[i]]['posttime'] = videotime[i]
+
+                print(self.dirmodel[channels[i]]['detframe'])
+
+                #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
+
+            else:
+
+                #print(f'deti= {i}')
+
+                #print(detframe[i])
+
+                #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
+
+                self.dirmodel[channels[i]]['detframe'].pop(0)
+
+                self.dirmodel[channels[i]]['detframe'].append(0)
+
+                print(self.dirmodel[channels[i]]['detframe'])
+
+                #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
+
+                #print(detframe[i])
+
+            # Stream results
+
+            #im0 = annotator.result()
+
+            #print(f'i = {i}')
+
+            #print(channels[i])
+
+            #print(postpretime[i])
+
+            #print(detframe[i])
+
+            if not self.dirmodel[channels[i]]['detflag'] and self.dirmodel[channels[i]]['detframe'].count(1)>=1:
+
+                self.dirmodel[channels[i]]['detflag'] = True
+
+                self.dirmodel[channels[i]]['detpretime'] = time.time()
+
+            elif self.dirmodel[channels[i]]['detframe'].count(1)==0 :
+
+                self.dirmodel[channels[i]]['detflag'] = False
+
+                self.dirmodel[channels[i]]['detpretime'] = float('inf')
+
+            # Stream results
+
+            #im0 = annotator.result()
+
+            if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and time.time() - self.dirmodel[channels[i]]['detpretime'] > self.dirmodel[channels[i]]['durtime']  and self.dirmodel[channels[i]]['detflag']:
+
+            #print()
+
+            #if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and self.dirmodel[channels[i]]['detflag']:
+
+                #print(time.time() - self.dirmodel[channels[i]]['detpretime'])
+
+                #print(self.dirmodel[channels[i]]['detflag'])
+
+                print('post-------------------------------------------------------------------------')
+
+                #time.sleep(30)
+
+             #print(time.time() - postpretime[i])
+
+                #print('111111111111111111111111111111111111111111111111')
+
+                #print(dirmodel[channels[i]]['preim'].shape)
+
+                success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim'])
+
+                content = encoded_image.tobytes()
+
+                successori, encoded_imageori = cv2.imencode('.jpg', self.dirmodel[channels[i]]['oripreim'])
+
+                contentori = encoded_imageori.tobytes()
+
+                filename = f'{p.stem}_{int(time.time())}.jpg'
+
+                filenameori = f'ori{p.stem}_{int(time.time())}.jpg'
+
+                print(f'str(p) {p.name}')
+
+                print(channels[i])
+
+                payload = {'channel': self.dirmodel[channels[i]]['channel'],
+
+                                   'classIndex': self.dirmodel[channels[i]]['classindex'],
+
+                                   'ip': self.dirmodel[channels[i]]['algip'],
+
+                                   'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', self.dirmodel[channels[i]]['posttime']),
+
+                                   'videoUrl': channels[i]}
+
+                files = [
+
+                            ('file', (filename, content, 'image/jpeg')),
+
+                            ('oldFile', (filenameori, contentori, 'image/jpeg')),
+
+                        ]
+
+                try:
+
+                    result = requests.post(url, data=payload, files=files)
+
+                    print(result)
+
+                except Exception:
+
+                    print('posterror')
+
+                #time.sleep(3000)
+
+                self.dirmodel[channels[i]]['postpretime'] = time.time()
+
+                self.dirmodel[channels[i]]['detflag'] = False
+
+                timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime'])
+
+                year = time.strftime('%Y',time.localtime(time.time()))
+
+                month = time.strftime('%m',time.localtime(time.time()))
+
+                day = time.strftime('%d',time.localtime(time.time()))
+
+                savefold = f'/mnt/project/images/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
+
+                savefold = Path(savefold)
+
+                savefold.mkdir(parents=True,exist_ok=True)
+
+                detsavefold = f'/mnt/project/detimages/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
+
+                detsavefold = Path(detsavefold)
+
+                detsavefold.mkdir(parents=True,exist_ok=True)
+
+                cv2.imwrite(f'{savefold}/{timesave}.jpg',self.dirmodel[channels[i]]['oripreim'])
+
+                cv2.imwrite(f'{detsavefold}/{timesave}det.jpg',self.dirmodel[channels[i]]['preim'])
+
+            #if self.dirmodel[channels[i]]['detframe'].count(1)==0:
+
+            #    self.dirmodel[channels[i]]['detflag'] = False
+
+                #time.sleep(1)
+
+
+
+            self.view_img = False
+
+            if self.view_img:
+
+                if platform.system() == 'Linux' and p not in windows:
+
+                    windows.append(p)
+
+                    cv2.namedWindow(f'{str(p)}-{Path(self.opt.weights).stem}',
+
+                                    cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
+
+                    cv2.resizeWindow(f'{str(p)}-{Path(self.opt.weights).stem}', im0.shape[1], im0.shape[0])
+
+                im1 = cv2.resize(im0, (1280, 720))
+
+                cv2.imshow(f'{str(p)}-{Path(self.opt.weights).stem}', im1)
+
+                cv2.waitKey(1)  # 1 millisecond
+
+
+
+            # Save results (image with detections)
+
+
+
+            # Print time (inference-only)
+
+            print(f'channels[i]={channels[i]}')
+
+            LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms {str(p)}-{Path(self.opt.weights).stem}")
+
+
+
+    def load(self):
+
+        conn = sqlite3.connect(self.source)
+
+        c = conn.cursor()
+
+        while True:
+
+            #
+
+            # print ("数据库打开成功")
+
+
+
+            cursor = c.execute(
+
+                "SELECT modelname, addstream,delstream,streaming  from CHANGESTREAM WHERE modelname= (?)",(Path(self.opt.weights).stem,))
+
+            # content = cursor.fetchall()
+
+            # if content[0][1] ==1 or content[0][2] ==1:
+
+            #     c.execute("UPDATE CHANGESTREAM set streamimg = 1 where modelname='yolov5s'")
+
+            #     print("updata changestream")
+
+            #     conn.commit()
+
+            # cursor = c.execute(
+
+            # "SELECT modelname, addstream,delstream,streamimg  from CHANGESTREAM WHERE modelname='yolov5s'")
+
+            self.contentid = cursor.fetchall()
+
+            #global tag
+
+            #tag = Value('i', self.contentid[0][3])
+
+            #print(tag.value==1)
+
+            print(f'loadcontent={self.contentid[0][3]}')
+
+            time.sleep(3)
+
+        c.close()
+
+        conn.close()
+
+    def readpoint(self):
+
+        data = {
+
+                    "algorithmCode": modelalgdir[Path(self.opt.weights).stem],
+
+                    "deviceIp":None,
+
+                    'fwqCode':None
+
+                }
+
+        self.dirmodel = {}
+
+        result = requests.post(url=urlt,data=data).json()['data']
+
+        channell=[]
+
+        for info in result: 
+
+        #content = cursor.fetchall()
+
+        #self.dirmodel = {}
+
+        #for address,fence,point,channel,classindex,ip ,algip,label,durtime in content:
+
+            #address = f'{address[:-1]}0'
+
+            channel = info["deviceChannel"]
+
+            if Path(self.opt.weights).stem == "danager" and channel =='45':
+
+                continue
+
+            channell.append(channel)
+
+            self.dirmodel[channel] = {}
+
+            self.dirmodel[channel]['fence'] = 1 if len(info["electricFence"])>0 else 0
+
+            if Path(self.opt.weights).stem == "uniform":
+
+                self.dirmodel[channel]['fence'] = 1
+
+            #self.dirmodel[channel]['point'] = point
+
+            self.dirmodel[channel]['channel'] = info['deviceChannel']
+
+            self.dirmodel[channel]['classindex'] = info['algorithmCode']
+
+            self.dirmodel[channel]['ip'] = info['deviceIp']
+
+            self.dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
+
+            dataele = {
+
+                    "algorithmCode": self.dirmodel[channel]['classindex'],
+
+                    "algorithmIp":self.dirmodel[channel]['algip'],
+
+                    "channel":self.dirmodel[channel]['channel']
+
+                }
+
+            resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
+
+            resultele = resultele.split(',||')
+
+            resultele = tuple(resultele)
+
+            point = '%s:'*len(resultele) %resultele
+
+            if Path(self.opt.weights).stem == 'personcount':
+
+                resultper = requests.post(url=urlperson,data=dataele).json()['data']
+
+                personcountdir[channel] = int(resultper)
+
+            if (Path(self.opt.weights).stem == "uniform" or Path(self.opt.weights).stem == "fall") and len(point[:-2])<=1:
+
+                self.dirmodel[channel]['point'] = "150#144,1100#144,1100#550,150#550"
+
+            else:
+
+                self.dirmodel[channel]['point'] = point[:-2]
+
+            self.dirmodel[channel]['preim'] = None
+
+            self.dirmodel[channel]['oripreim'] = None
+
+            self.dirmodel[channel]['detframe'] = [0 for _ in range(2)]
+
+            self.dirmodel[channel]['postpretime'] = 0
+
+            self.dirmodel[channel]['detflag'] = False
+
+            self.dirmodel[channel]['detpretime'] = float('inf')
+
+            self.dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
+
+            if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty' :
+
+                datatime= {
+
+                        "algorithmCode": self.dirmodel[channel]['classindex'],
+
+                        "algorithmIp":self.dirmodel[channel]['algip'],
+
+                        "channel":self.dirmodel[channel]['channel']
+
+                        }
+
+                resulttime = requests.post(url=urltime,data=dataele).json()['data']
+
+                self.dirmodel[channel]['durtime'] = int(resulttime)
+
+            else:
+
+                self.dirmodel[channel]['durtime'] = 0
+
+            self.dirmodel[channel]['posttime'] = 0
+
+        print(self.dirmodel)
+
+        return sorted(channell)
+
+    #    str = str.split(":")
+
+    #    lista = []
+
+    #    for liststr in str:
+
+    #        if len(liststr) > 0:
+
+    #            li = liststr.split(',')
+
+    #            listpoint = []
+
+    #            for i, j in zip(li[::2], li[1::2]):
+
+    #                listpoint.append((i, j))
+
+    #            lista.append(listpoint)
+
+    #    return listpoint
+
+
+
+
+
+#def preprocess():
+
+#        print('preprocess-----------------------------------------------')
+
+
+
+def getframe(queuelist,channelsl,source,tt,numworks,lock,numworkv):
+
+    while True:
+
+        print("dataloader")
+
+        imgsz = [768, 768]
+
+        print(f'source = {source}')
+
+        dataset = LoadStreamsSQLTN(channelsl,source, img_size=832,
+
+                                      auto=True, vid_stride=20, tt=tt,numworks = numworks)
+
+        bs = len(dataset)
+
+        vid_path, vid_writer = [None] * bs, [None] * bs
+
+        # self.detframe = [[0 for _ in range(8)] for i in range(bs)]
+
+        # self.postpretime = [0]*bs
+
+        # Run inference
+
+
+
+        #imgsz = (1 , 3, *self.imgsz)
+
+        print(imgsz)
+
+        #self.model.warmup(imgsz=(1 , 3, *imgsz))  # warmup
+
+        seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+
+
+
+        #
+
+        # print ("数据库打开成功")
+
+        pretime = time.time()
+
+        tag = 0
+
+        sourcebase = 'project0117.db'
+
+        for path, im, im0s, vid_cap, s, videotime,channels in dataset:
+
+            # print('*'*21)
+
+            # global tag
+
+            # print('datasetvideo')
+
+            # if time.time()-pretime > 300:
+
+            #     pretime = time.time()
+
+
+
+            #     conn = sqlite3.connect(sourcebase)
+
+            #     c = conn.cursor()
+
+            #     cursor = c.execute("SELECT modelname, addstream,delstream,streaming  from CHANGESTREAM WHERE modelname= 'helmet'")
+
+            #     contentid = cursor.fetchall()
+
+            #     tag = contentid[0][3]
+
+            # if tag == 1:
+
+            #     lock.acquire()
+
+            #     numworkv.value += 1
+
+            #     dataset.close()
+
+            #     if numworkv.value==3:
+
+            #         print('newstreaming=', tag)
+
+            #         conn = sqlite3.connect(source)
+
+            #         c = conn.cursor()
+
+            #         c.execute("UPDATE CHANGESTREAM set streaming = 0 , addstream=0,delstream=0 where modelname='helmet'")
+
+            #         print(123)
+
+            #         conn.commit()
+
+            #         c.close()
+
+            #         conn.close()
+
+            #     lock.release()
+
+            #     print('opencv1')
+
+            #     # cv2.destroyAllWindows()
+
+            #     print('opencv')
+
+            #     break
+
+            # else:
+
+            #     print('nonewstreaming=', tag)
+
+            if time.time()-pretime > 300:
+
+                channellist = []
+
+                pretime = time.time()
+
+                data = {
+
+                    "algorithmCode": None,
+
+                    "deviceIp":None,
+
+                    "fwqCode":None
+
+
+
+                }
+
+                try:
+
+                    result = requests.post(url=urlt,data=data).json()['data']
+
+                except Exception:
+
+                    result = []
+
+                for info in result:
+
+                    data = {
+
+                        "channel": info["deviceChannel"],
+
+                    "ip": info["deviceAlgorithmIp"]
+
+                    }
+
+                    chaflag = any(info["deviceChannel"] in t for t in channellist)
+
+        #personcountdir[channel] = num
+
+                    if not chaflag:
+
+                        address = requests.post(url=urlrtsp,data=data).json()['msg']
+
+                        channellist.append((info['deviceChannel'],address))
+
+                channelsa  = []
+
+                sourcea = []
+
+                channellist = set(channellist)
+
+                channellist = sorted(channellist,key=lambda x:x[0])
+
+                #channellist = set(channellist)
+
+                for cha,add in channellist:
+
+                    channelsa.append(cha)
+
+                    sourcea.append(add)
+
+                channelsl = sorted(channelsl)
+
+                #channelsa = sorted(channelsa)
+
+                if channelsa!=channelsl and len(channelsa)>0:
+
+                    print(f'channelsa = {channelsa}')
+
+                    print(f'channelsl = {channelsl}')
+
+                    dataset.close()
+
+                    channelsl = channelsa
+
+                    source = sourcea
+
+                    break;
+
+            for key,value in queuelist.items():
+                hour = time.localtime(time.time()).tm_hour
+                if hour in range(7,18):
+                    value[-1].put((path, im, im0s, vid_cap, s, videotime,channels))
+
+                    value[-1].get() if value[-1].qsize() == 10 else time.sleep(0.001)
+
+
+
+def getmutpro(channels,source,streamlist,numworkv,lock,numworks=1,modellen=None):
+
+    processlist = []
+
+    queuelist = {}
+
+    for i in range(numworks):
+
+        for model in modellen:    
+
+            queue = Queue(maxsize=10)
+
+            queuelist.setdefault(model,[])
+
+            queuelist[model].append(queue)
+
+        process = Process(target=getframe,
+
+                        args=(queuelist, channels,source, i,numworks,lock,numworkv))
+
+        processlist.append(process)
+
+        process.start()
+
+        #queuelist.append(queue)
+
+    return queuelist
+
+
+
+#    path = []
+
+#    im0s = []
+
+#    vid_cap = None
+
+#    s = ''
+
+#    videotime = []
+
+#    while True:
+
+#        imlist = []
+
+#        pathlist = []
+
+#        im0slist = []
+
+#        channelslist = []
+
+#        vid_cap = None
+
+#        s = ''
+
+#        videotimelist = []
+
+#        for q in queuelist:
+
+#            if q.qsize()>0:
+
+#                setframe = q.get()
+
+#                path, im, im0s, vid_cap, s, videotime ,channels = setframe
+
+#                pathlist += path
+
+#                channelslist +=channels 
+
+#                im0slist += im0s
+
+#                videotimelist += videotime
+
+#                imlist.append(im)
+
+#        if len(imlist)>0:
+
+#            im = np.concatenate(imlist)
+
+#        if len(pathlist)>0:
+
+#            print(len(path),im.shape,len(im0s))
+
+#            streamlist.append((pathlist, im, im0slist, vid_cap, s, videotimelist,channelslist))
+
+            #print(f'streamlist = {len(streamlist)}')
+
+#        streamlist.pop(0) if len(streamlist) > 3 else time.sleep(0.001)
+
+
+
+def modelfun(queue,weights,sourcedb,classes,device,classify,conf_thres,runmodel,v8=False):
+
+    print(weights)
+
+    detectdemo=Detect(weights=weights,source=sourcedb,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
+
+    detectdemo.infer(queue,runmodel)
+
+
+
+def parse_opt():
+
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
+
+    opt = parser.parse_args()
+
+    return opt
+
+
+
+
+
+def main(opt):
+
+    check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+
+    run(**vars(opt))
+
+
+
+
+
+if __name__ == '__main__':
+
+    #torch.multiprocessing.set_start_method('spawn')
+
+    #set_start_method('spawn')
+
+    opt = parse_opt()
+    DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
+    processor = AutoProcessor.from_pretrained(r"/mnt/liq_weight/smolVLM")
+    model = AutoModelForVision2Seq.from_pretrained(
+        r"/mnt/liq_weight/smolVLM",
+        torch_dtype=torch.bfloat16,
+        # _attn_implementation="flash_attention_2" if DEVICE == "cuda" else "eager",
+    ).to(DEVICE)
+
+    dbpath = 'projectnew.db'
+
+    
+
+    conn = sqlite3.connect(dbpath)
+
+#
+
+# print ("数据库打开成功")
+
+    c = conn.cursor()
+
+    task(c,conn,urlt,urla)
+
+    cursor = c.execute('select channel,algip  from stream ')
+
+    result = cursor.fetchall()
+
+    for channel ,algip in result:
+
+        data = {
+
+        "channel": channel,
+
+        "ip":algip
+
+        }
+
+        #personcountdir[channel] = num
+
+        address = requests.post(url=urlrtsp,data=data).json()['msg']
+
+        c.execute('UPDATE STREAM set address= (?) where channel =(?)',(address,channel))
+
+    conn.commit()
+
+    cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname ='duty'  or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname= 'arm' or modelname = 'persontre' or modelname = 'bag'")
+
+    #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+
+    content = cursor.fetchall()
+
+    cursor = c.execute("SELECT address,channel from STREAM ")
+
+    #cursor = c.execute("SELECT address from STREAM where modelname = 'helmet'")
+
+    contenta = cursor.fetchall()
+
+    source = []
+
+    modellist = []
+
+    addcha = []
+
+    channellist = []
+
+    for i in contenta:
+
+        addcha.append((i[0],i[1]))
+
+        #modellist.append(i[1])
+
+    addcha = set(addcha)
+
+    addcha = sorted(addcha,key=lambda x:x[1])
+
+    for add,cha in addcha:
+
+        source.append(add)
+
+        channellist.append(cha)
+
+    #source = set(source)
+
+    print(addcha)
+
+    source = list(source)
+
+    cursor = c.execute("SELECT modelname from STREAM where (modelname ='helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname = 'duty'  or modelname = 'sleep'  or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag')")
+
+    contentm = cursor.fetchall()
+
+    for m in contentm:
+
+        modellist.append(m[0])
+
+    modellist = set(modellist)
+
+    modellist = list(modellist)
+
+    contentlist = []
+
+    for i in content:
+
+        contentlist.append(i[0])
+
+    #source.sort()
+
+    n = len(content)
+
+    print(f'modelname={n}')
+
+    print(content)
+
+    #content.reverse()
+
+    print(content)
+
+    print(source)
+
+    # main(opt)
+
+    #processes = []
+
+    streamqueue = Queue(maxsize=4)
+
+    numworkv = Value('i', 0)
+
+    manager = Manager()
+
+    lock = multiprocessing.Lock()
+
+    streamlist = manager.list()
+
+    numworks = 7
+
+    modellen = []
+
+    for i in modellist:
+
+        if i in contentlist:
+
+            modellen.append(i)
+
+    queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks,modellen)
+
+    deid = 0
+
+    #pool = ThreadPoolExecutor(max_workers=n)
+
+    runmodel = manager.dict()
+
+    while True:
+
+        for i in modellist:
+
+            if i in contentlist:
+
+                if i not in runmodel:
+
+            #print(i)
+
+            #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
+
+                    c.execute('select conf,cla from changestream where modelname = (?)',(i,))
+
+                    rea = c.fetchall()
+
+                    print(f'weights ={i[0]}.pt')
+
+                    if i in ['duty','danager','inspection','cross','personcount']:
+
+                        process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,[0],0,rea[0][1],rea[0][0],runmodel,True))
+
+                    else:
+
+                        if i in ['fall','extinguisher']:
+
+                            process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel))
+
+                        else:
+
+                            process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel,True))
+
+
+
+                #elif i in ['helmet','fire','smoke','fall']:
+
+                #process = Process(target=modelfun,args=(queuelist,f'{i}.pt',dbpath,None,deid%2,True))
+
+                #else:
+
+                #process = Process(target=modelfun,args=(queuelist,f'{i}.pt',dbpath,None,deid%2,False))
+
+
+
+                #processes.append(process)
+
+                #process.start()
+
+                #detectobj = Process(target=detectdemo.infer,args=(queue,))
+
+                # Detect(weights=f'{i[0]}.pt')
+
+
+
+                    time.sleep(3)
+
+                    process.start()
+
+                    deid = deid+1
+
+                    runmodel[i] = 1
+
+        time.sleep(600)
+
+        task(c,conn,urlt,urla)
+
+        #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'fall' or modelname = 'uniform' or modelname = 'personcount' or modelname = 'arm' or modelname = 'bag'")
+
+        #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+
+        cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname ='duty'  or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag'")
+
+        content = cursor.fetchall()
+
+        contentlist = []
+
+        for con in content:
+
+            contentlist.append(con[0])
+
+        #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
+
+        cursor = c.execute("SELECT address,channel from STREAM ")
+
+        contenta = cursor.fetchall()
+
+        source = []
+
+        modellist = []
+
+        addcha = []
+
+        channellist = []
+
+        for i in contenta:
+
+            addcha.append((i[0],i[1]))
+
+            #modellist.append(i[1])
+
+        addcha = set(addcha)
+
+        addcha = sorted(addcha)
+
+        for a,cha in addcha:
+
+            source.append(a)
+
+            channellist.append(cha)
+
+        print(addcha)
+
+        #source = set(source)
+
+        source = list(source)
+
+        #source.sort()
+
+        cursor = c.execute("SELECT modelname from STREAM where (modelname = 'helmet' or modelname = 'smoke' or modelname = 'uniform' or modelname = 'fire' or modelname = 'duty'  or modelname = 'sleep' or modelname='occupancy' or modelname = 'personcar' or modelname = 'phone' or modelname = 'reflective' or modelname = 'extinguisher' or modelname = 'danager' or modelname = 'inspection' or modelname = 'cross' or modelname = 'personcount' or modelname = 'arm' or modelname = 'persontre' or modelname = 'bag')")
+
+        contentm = cursor.fetchall()
+
+        for m in contentm:
+
+            modellist.append(m[0])
+
+        modellist = set(modellist)
+
+        n = len(content)
+
+        print(f'modelname={n}')
+
+        print(content)
+
+        #content.reverse()
+
+        print(content)
+
+        #pool.submit(detectobj.infer)
+
+
+
+    #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
+
+    #content = cursor.fetchall()
+
+    #n = len(content)
+
+    #print(f'modelname={n}')
+
+    #print(content)
+
+    #content.reverse()
+
+    #print(content)
+
+    # main(opt)
+
+    #processes = []
+
+    #pool = ProcessPoolExecutor(max_workers=n)
+
+    #for i in content:
+
+        #print(i)
+
+        #detectdemo=Detect(weights=f'{i[0]}.pt')
+
+        #process = Process(target=detectdemo.infer)
+
+        #processes.append(process)
+
+        #process.start()
+
+        #detectobj = Detect(weights=f'{i[0]}.pt')
+
+    #    time.sleep(3)
+
+        #pool.submit(detectobj.infer)
+
+    #    print('111111111111111111111111111111111111111111111111111111111')
+
+        #pool.submit(TestA().func1, '张三', i)
+
+        #print('----------------------------------------------------------------')
+
+    #time.sleep(3000)
+
+    # 等待所有进程执行完毕
+
+    #for process in processes:
+
+    #    process.join()
+
+
+
+        #pool.submit(Detect(weights=f'{i[0]}.pt').infer)
+
+    # if isinstance(opt.weights,list):
+
+    #     opt.weights = opt.weights[0]
+
+    #signal.signal(signal.SIGINT, my_handler)
+
+    #detectdemo1 = Detect(weights=f'{content[0][0]}.pt')
+
+    #detectdemo1.infer()
+
+    #a = Test
+
+    #with ProcessPoolExecutor(3) as ppool:
+
+        #for i in range(3):
+
+    #        print('hello')
+
+                        #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
+
+            #ppool.submit(TestA().func1, '张三', i)
+
+    #ta = TestA()
+
+    #with ProcessPoolExecutor(5) as ppool: # 创建一个5个进程的进程池
+
+    #    for i in range(1, 4):
+
+    #        ppool.submit(func1, '张三', i)
+
+    #f1= pool.submit(detectdemo1.infer)
+
+    # print("线程1-----------------------------------------------------------------------------------")
+
+    #detectdemo2 = Detect(weights=r"helmet.pt")
+
+    #f2=pool.submit(detectdemo2.infer)
+
+    # print("线程2-------------------------------------------------------------------------------------")
+
+    #detectdemo3 = threading.Thread(target=detectdemo3.infer)
+
+    #detectdemo3 = Detect(weights=r"fall.pt")
+
+    #f3=pool.submit(detectdemo3.infer)
+
+

+ 1193 - 0
detectopencvmutfall0207.py

@@ -0,0 +1,1193 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
+
+Usage - sources:
+    $ python detect.py --weights yolov5s.pt --source 0                               # webcam
+                                                     img.jpg                         # image
+                                                     vid.mp4                         # video
+                                                     screen                          # screenshot
+                                                     path/                           # directory
+                                                     list.txt                        # list of images
+                                                     list.streams                    # list of streams
+                                                     'path/*.jpg'                    # glob
+                                                     'https://youtu.be/Zgi9g1ksQHc'  # YouTube
+                                                     'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
+
+Usage - formats:
+    $ python detect.py --weights yolov5s.pt                 # PyTorch
+                                 yolov5s.torchscript        # TorchScript
+                                 yolov5s.onnx               # ONNX Runtime or OpenCV DNN with --dnn
+                                 yolov5s_openvino_model     # OpenVINO
+                                 yolov5s.engine             # TensorRT
+                                 yolov5s.mlmodel            # CoreML (macOS-only)
+                                 yolov5s_saved_model        # TensorFlow SavedModel
+                                 yolov5s.pb                 # TensorFlow GraphDef
+                                 yolov5s.tflite             # TensorFlow Lite
+                                 yolov5s_edgetpu.tflite     # TensorFlow Edge TPU
+                                 yolov5s_paddle_model       # PaddlePaddle
+"""
+import matplotlib.path as mat
+import requests
+import argparse
+import os
+import platform
+import sqlite3
+import sys
+import threading
+import time
+from pathlib import Path
+import signal
+import torch
+from concurrent.futures import ThreadPoolExecutor
+from concurrent.futures import ProcessPoolExecutor
+from multiprocessing import Process,Manager,Value
+from multiprocessing import Queue
+from multiprocessing import set_start_method
+import multiprocessing
+import multiprocessing as mp
+import numpy as np
+from torchvision import transforms
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[0]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+
+from models.common import DetectMultiBackend
+from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLT,LoadStreamsSQLTN
+from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
+                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,apply_classifier1,compute_IOU,task,compute_IOU)
+from utils.plots import Annotator, colors, save_one_box
+from utils.torch_utils import select_device, smart_inference_mode
+from utils.renwu import newHelmet,newUniform,Fall,Personcount,Arm,Bag,Cross,Extinguisher,Persontre,Bag,Danager
+import torch.nn as nn
+
+import torch.nn.functional as F
+from ultralytics import YOLO
+
+
+from pydantic import BaseModel
+#from testpool import func1,TestA
+
+# def my_handler(signum, frame):
+#     exit(0)
+#url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
+url = "http://172.19.152.231/open/api/operate/upload"
+urlrtsp = "http://172.19.152.231/open/api/operate/previewURLs"
+urlt = "http://172.19.152.231/open/api/operate/taskList"
+urla = "http://172.19.152.231/open/api/operate/algorithmList"
+urlele = "http://172.19.152.231/open/api/operate/fence"
+urltime = "http://172.19.152.231/open/api/operate/getTime"
+urlperson = "http://172.19.152.231/open/api/operate/getPersonLimitNum"
+#modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person'}
+#algmodel = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51'}
+modelnamedir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'duty','98':'face','55':'oil','52':'jingdian','53':'rope','54':'personcar','39':'inspection','11':'reflective','12':'phone','66':'extinguisher','67':'tizi','68':'menjin','35':'arm','36':'persontre','33':'bag'}
+modellabeldir = {'0':'head,person','8':'person','10':'black_work_clothes,blue_work_clothes,person','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','34':'personcount','37':'other','38':'person','98':'face','55':'oil','52':'person,hand,ball','53':'rope','54':'person','39':'person','11':'blue,greent,whitet,bluecoat,whitebarcoat,graycoat,baoan,chenyi,other','12':'phone','66':'extinguisher','67':'person,tizi','68':'person','35':'barearm','36':'person,foot,cart,bag,box','33':'handbox,handbag'}
+modelalgdir = {}
+personcountdir = {}
+for key,value in modelnamedir.items():
+    modelalgdir[value] = key
+taskmap = {'helmet':newHelmet,'uniform':newUniform,'fall':Fall,'personcount':Personcount,'arm':Arm,'bag':Bag,'cross':Cross,'extinguisher':Extinguisher,'persontre':Persontre,'bag':Bag,'danager':Danager}
+mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
+test = transforms.Compose([transforms.Resize((224,224)),
+        #transforms.CenterCrop(224),
+        transforms.ToTensor(),
+        transforms.Normalize(mean=mean, std=std)
+                           ])
+def clapre(modelcla,claimg,clapoint):
+    imgten = torch.stack(claimg,dim=0)
+    clapoint = torch.stack(clapoint,dim=0)
+    imgten = imgten.to(0)
+    result = modelcla(imgten)
+    result = F.softmax(result)
+    print(result)
+    index = result.argmax(1)
+    index = index.cpu().numpy()
+    index = np.argwhere(index<5)
+    index = index.reshape(-1)
+    print(index)
+    if len(index)>0:
+        print(clapoint[index])
+        return clapoint[index]
+    else:
+        return None
+
+class Model(nn.Module):
+    def __init__(self, A, nnode, nfeature, nclass):
+        super().__init__()
+        self.fc1 = nn.Linear(nnode * nfeature, 512)
+        self.fc2 = nn.Linear(512, nclass)
+
+    def forward(self, x):
+        x = x.view(-1, int(x.size(1) * x.size(2)))
+        x = F.relu(self.fc1(x))
+        x = F.dropout(x, 0.7, training=self.training)
+        return self.fc2(x)
+
+def extract_keypoint(get_keypoint, keypoint):
+    # nose
+    nose_x, nose_y = keypoint[get_keypoint.NOSE]
+    # eye
+    # left_eye_x, left_eye_y = keypoint[get_keypoint.LEFT_EYE]
+    # right_eye_x, right_eye_y = keypoint[get_keypoint.RIGHT_EYE]
+    # # ear
+    # left_ear_x, left_ear_y = keypoint[get_keypoint.LEFT_EAR]
+    # right_ear_x, right_ear_y = keypoint[get_keypoint.RIGHT_EAR]
+    # shoulder
+    left_shoulder_x, left_shoulder_y = keypoint[get_keypoint.LEFT_SHOULDER]
+    right_shoulder_x, right_shoulder_y = keypoint[get_keypoint.RIGHT_SHOULDER]
+    # elbow
+    left_elbow_x, left_elbow_y = keypoint[get_keypoint.LEFT_ELBOW]
+    right_elbow_x, right_elbow_y = keypoint[get_keypoint.RIGHT_ELBOW]
+    # wrist
+    left_wrist_x, left_wrist_y = keypoint[get_keypoint.LEFT_WRIST]
+    right_wrist_x, right_wrist_y = keypoint[get_keypoint.RIGHT_WRIST]
+    # hip
+    left_hip_x, left_hip_y = keypoint[get_keypoint.LEFT_HIP]
+    right_hip_x, right_hip_y = keypoint[get_keypoint.RIGHT_HIP]
+    # knee
+    left_knee_x, left_knee_y = keypoint[get_keypoint.LEFT_KNEE]
+    right_knee_x, right_knee_y = keypoint[get_keypoint.RIGHT_KNEE]
+    # ankle
+    left_ankle_x, left_ankle_y = keypoint[get_keypoint.LEFT_ANKLE]
+    right_ankle_x, right_ankle_y = keypoint[get_keypoint.RIGHT_ANKLE]
+    
+    return [
+        nose_x, nose_y ,
+        left_shoulder_x, left_shoulder_y ,
+        right_shoulder_x, right_shoulder_y,
+        left_elbow_x, left_elbow_y,  
+        right_elbow_x, right_elbow_y, 
+        left_wrist_x, left_wrist_y,
+        right_wrist_x, right_wrist_y,
+        left_hip_x, left_hip_y, 
+        right_hip_x, right_hip_y, 
+        left_knee_x, left_knee_y, 
+        right_knee_x, right_knee_y, 
+        left_ankle_x, left_ankle_y, 
+        right_ankle_x, right_ankle_y
+    ]
+
+
+class GetKeypoint(BaseModel):
+    NOSE:           int = 0
+    LEFT_EYE:       int = 1
+    RIGHT_EYE:      int = 2
+    LEFT_EAR:       int = 3
+    RIGHT_EAR:      int = 4
+    LEFT_SHOULDER:  int = 5
+    RIGHT_SHOULDER: int = 6
+    LEFT_ELBOW:     int = 7
+    RIGHT_ELBOW:    int = 8
+    LEFT_WRIST:     int = 9
+    RIGHT_WRIST:    int = 10
+    LEFT_HIP:       int = 11
+    RIGHT_HIP:      int = 12
+    LEFT_KNEE:      int = 13
+    RIGHT_KNEE:     int = 14
+    LEFT_ANKLE:     int = 15
+    RIGHT_ANKLE:    int = 16
+
+class Box(BaseModel):
+    left : int 
+    top  : int
+    right : int
+    bottom : int
+    box_conf : float
+    pose_classifer_conf : float
+    label : str
+
+class YoloOpt:
+    def __init__(self, weights=ROOT / 'yolov5s.pt',source=ROOT / 'data/images',data=ROOT / 'data/coco128.yaml',
+                 imgsz=(640,640),
+                 conf_thres=0.25,
+                 iou_thres=0.45,
+                 max_det=1000,
+                 device='',
+                 view_img=False,
+                 save_txt=False,
+                 save_conf=False,
+                 save_crop=False,
+                 nosave=True,
+                 classes=None,
+                 agnostic_nms=False,
+                 augment=False,
+                 visualize=False,
+                 update=False,
+                 project=ROOT / 'runs/detect',
+                 name='exp',
+                 exist_ok=False,
+                 line_thickness=1,
+                 hide_labels=False,
+                 hide_conf=False,
+                 half=False,
+                 dnn=False,
+                 vid_stride=10,
+                 classify=False,
+                 v8=False):
+
+        self.weights = weights  # 权重文件地址
+        self.source = source  # 待识别的图像
+        self.data = data
+        if imgsz is None:
+            self.imgsz = (640, 640)
+        self.imgsz = imgsz  # 输入图片的大小,默认 (640,640)
+        self.conf_thres = conf_thres  # object置信度阈值 默认0.25  用在nms中
+        self.iou_thres = iou_thres  # 做nms的iou阈值 默认0.45   用在nms中
+        self.device = device  # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法
+        self.view_img = view_img  # 是否展示预测之后的图片或视频 默认False
+        self.classes = classes  # 只保留一部分的类别,默认是全部保留
+        self.agnostic_nms = agnostic_nms  # 进行NMS去除不同类别之间的框, 默认False
+        self.augment = augment  # augmented inference TTA测试时增强/多尺度预测,可以提分
+        self.update = update  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+        self.exist_ok = exist_ok  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+        self.project = project  # 保存测试日志的参数,本程序没有用到
+        self.name = name  # 每次实验的名称,本程序也没有用到
+        self.max_det = max_det
+        self.save_txt = save_txt
+        self.save_conf= save_conf
+        self.save_crop= save_crop
+        self.nosave = nosave
+        self.visualize = visualize
+        self.line_thickness = line_thickness
+        self.hide_labels = hide_labels
+        self.hide_conf = hide_conf
+        self.half = half
+        self.dnn = dnn
+        self.vid_stride = vid_stride
+        self.classify = classify
+        self.v8 = v8
+class Detect:
+    def __init__(self, weights = ROOT / 'yolov5s.pt' , imgsz=(640,640),source="changshusql1103.db",classes=None,device=None,classify=False,conf_thres=0.25,v8=False):
+        print(f'detectweights = {weights}')
+        if v8:
+            from ultralytics.nn.autobackend import AutoBackend
+            from ultralytics.utils.ops import non_max_suppression
+        else:
+            from utils.general import non_max_suppression
+        self.opt = YoloOpt(weights=weights, imgsz=imgsz,source=source,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
+        self.source = str(self.opt.source)
+        self.save_img = not self.opt.nosave and not source.endswith('.txt')  # save inference images
+        is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
+        is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
+        self.webcam = self.source.isnumeric() or source.endswith('.db') or (is_url and not is_file)
+        screenshot = self.source.lower().startswith('screen')
+        if is_url and is_file:
+            self.source = check_file(self.source)  # download
+        self.save_dir = increment_path(Path(self.opt.project) / self.opt.name, exist_ok=self.opt.exist_ok)  # increment run
+        #self.save_dir = self.save_dir / Path(self.opt.weights).stem
+        #self.save_dir.mkdir(parents=True, exist_ok=True)
+        (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)  # make dir
+        print(f'device = {self.opt.device}')
+        device = select_device(self.opt.device)
+        self.device = device
+        # if v8:
+        #     self.model = AutoBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+        #     if Path(weights).stem in ['arm']:
+        #         self.personmodel = AutoBackend('yolov8m.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+        # else:
+        #     self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+        #     if Path(weights).stem in ['helmet','arm','uniform']:
+        #         self.personmodel = DetectMultiBackend('personcount.pt', device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)        
+        # self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
+        # self.classify = classify
+        # if self.classify:
+        #     classifier_model = torch.load(f"{Path(weights).stem}cls.pt")
+        #     self.classifier_model = classifier_model.to(device)
+        #     self.classifier_model.eval()
+        # self.imgsz = check_img_size(self.opt.imgsz, s=self.stride)
+        # self.model.warmup(imgsz=(1 , 3, *self.imgsz))
+        self.get_keypoint = GetKeypoint()
+
+        # load model
+        self.pose_classfier_model = Model(None, 13, 2, 2)
+        self.pose_classfier_model.load_state_dict(torch.load("posefallcls.pt"))
+        self.pose_classfier_model.eval()
+        self.pose_classfier_model.to(device)
+        self.model = YOLO("posefall.pt")
+        self.model = self.model.to(device)
+        self.personmodel = YOLO('yolo11m.pt')
+        self.personmodel = self.personmodel.to(device)
+        self.readpoint()
+        #print(self.imgsz)
+        self.updatetime = time.time()
+        self.updatemtime = time.time()
+        self.filetime = os.path.getmtime(self.opt.weights)
+        self.taskname = taskmap[Path(self.opt.weights).stem]()
+        bs = 1  # batch_size
+        if self.webcam:
+            #self.view_img = check_imshow(warn=True)
+            self.view_img = False
+            # dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
+            # bs = len(dataset)
+        elif screenshot:
+            dataset = LoadScreenshots(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt)
+        else:
+            dataset = LoadImages(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt, vid_stride=self.opt.vid_stride)
+        t1 = threading.Thread(target=self.load,daemon=True)
+        t1.start()
+    @smart_inference_mode()
+    def infer(self,queue,runmodel):
+            pretime = time.time()
+            seen, windows, self.dt = 0, [], (Profile(), Profile(), Profile())
+            #
+            # print ("数据库打开成功")
+            while True:
+                #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
+                if time.time()-pretime>300:
+                    ret = self.readpoint()
+                    pretime = time.time()
+                    if not ret:
+                        print(f'{Path(self.opt.weights).stem} {runmodel}')
+                        runmodel.pop(Path(self.opt.weights).stem)
+                        print(f'{Path(self.opt.weights).stem} {runmodel}')
+                        break
+                print(f'queuelen = {len(queue)}')
+                for que in queue:
+                    if que.qsize() == 0:
+                        print('queuezero')
+                        time.sleep(0.01)
+                    if que.qsize() > 0:
+                        #if time.time()-pretime>300:
+                        #    ret = self.readpoint()
+                        #    pretime = time.time()
+                        #    if not ret:
+                        #        print(f'{Path(self.opt.weights).stem} {runmodel}')
+                        #        runmodel.pop(Path(self.opt.weights).stem)
+                        #        print(f'{Path(self.opt.weights).stem} {runmodel}')
+                        #        break
+                        setframe = que.get()
+                    # print('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')
+                    #if setframe is not None
+                        path, im, im0s, vid_cap, s, videotime ,channels = setframe
+                        # algchannel = list(self.dirmodel.keys())
+                        # print(algchannel)
+                        # print(path)
+                        # algchannel = np.array(algchannel)
+                        # channelsnp = np.array(channels)
+                        # algindex = np.where(np.in1d(channelsnp, algchannel))[0]
+                        # algindex = list(algindex)
+
+                        # path = np.array(path)
+                        # path = path[algindex]
+                        # path = path.tolist()
+                        # channels = np.array(channels)
+                        # channels = channels[algindex]
+                        # channels = channels.tolist()
+                   # print(algindex)
+                        # if len(algindex)==0:
+                        #     continue
+                    #for ia in algindex:
+                    #    print(type(im0s[ia]))
+                    #    print(im0s[ia].shape)
+                        #im = im[algindex]
+                    #for ia in algindex:
+                    #    print(type(ia))
+                        # try: 
+                        #     im0s = np.asarray(im0s)
+                        # except Exception:
+                        #     im0s = np.asarray(im0s,dtype=object)
+                    #     print(im0s.shape)
+                    #     im0s = im0s[algindex]
+                    # # im0s = im0s.tolist()
+                    #     print(f'algindex = {algindex}')
+                    #     print(f'im0s ={im0s[0].shape}')
+                    #     videotime = np.array(videotime)
+                    #     videotime = videotime[algindex]
+                    #     videotime = tuple(map(tuple, videotime))
+                    # global tag
+                    # if self.contentid[0][3] == 1 :
+                    #    dataset.close()
+                    #    print('newstreaming=', self.contentid[0][3])
+                    #    conn = sqlite3.connect(self.source)
+                    #    c = conn.cursor()
+                    #    c.execute("UPDATE CHANGESTREAM set streamimg = 0 , addstream=0,delstream=0 where modelname= (?)",(Path(self.opt.weights).stem,))
+                    #    print(123)
+                    #    conn.commit()
+                    #    c.close()
+                    #    conn.close()
+                    #    print('opencv1')
+                    # cv2.destroyAllWindows()
+                    #    print('opencv')
+                    #    break
+                    # else:
+                    #    print('nonewstreaming=', self.contentid[0][3])
+                    #     with self.dt[0]:
+                    #         im = torch.from_numpy(im).to(self.model.device)
+                    #         im = im.half() if self.model.fp16 else im.float()  # uint8 to fp16/32
+                    #         im /= 255  # 0 - 255 to 0.0 - 1.0
+                    #         if len(im.shape) == 3:
+                    #             im = im[None]  # expand for batch dim
+
+                    # # Inference
+                    #     with self.dt[1]:
+                    #         visualize = increment_path(self.save_dir / Path(path).stem,
+                    #                                mkdir=True) if self.opt.visualize else False
+                    #     #print('error')
+                    #     # print(self.model)
+                    #         pred = self.model(im, augment=self.opt.augment, visualize=visualize)
+                        self.postprocess(path, im0s, im, s, videotime,channels)
+               # print(f'predshape= {')
+
+            # NMS
+                #processlist = []
+            #for i in range(3):
+            #    process = Process(target=self.postprocess,args=(pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s))
+            #    process = Process(target=self.preprocess)
+            #    process.start()
+            #    processlist.append(process)
+            #for j in processlist:
+            #    j.join()
+            #with ProcessPoolExecutor(3) as ppool:
+                #for i in range(3):
+            #        print('hello')
+                    #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
+                    #ppool.submit(func1, '张三', i)
+                    #ppool.submit(self.preprocess)
+            #self.postprocess(pred, path, im0s, dataset, im, s)
+            #process = Process(target=self.postprocess, args=(pred, path, im0s, dataset, im, s))
+                #self.postprocess(pred, path, im0s, im, s,videotime)
+                #process.start()
+    #def preprocess(self):
+    #    print('preprocess-----------------------------------------------')
+    def postprocess(self, path, im0s,  im, s,videotime,channels):
+        
+        # if time.time()-self.updatemtime>300:
+        #     if self.filetime !=os.path.getmtime(self.opt.weights):
+        #         device = select_device(self.opt.device)
+        #         print("load new load")
+        #         self.model = DetectMultiBackend(self.opt.weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+        #         self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
+        #     #try:
+        #     #if  modelalgdir[Path(self.opt.weights).stem]!='0':
+        #     print(modelalgdir[Path(self.opt.weights).stem])
+        #     try:
+        #         rea = requests.post(url=urla,data={'algorithmCode':modelalgdir[Path(self.opt.weights).stem]}).json()['data']
+        #         con = rea[0]['confidence']
+        #         self.opt.conf_thres = con
+        #     except Exception:
+        #         print('error')
+        #     #else:
+        #     #    self.opt.conf_thres = 0.25
+        #     #except Exception:
+        #         #print('posturlaerror')
+        #     self.updatemtime = time.time()
+
+        # seen = 0
+        # # dt = (Profile(), Profile(), Profile())
+        # print(f'senn = {seen}')
+        # windows = []
+        # if Path(self.opt.weights).stem:
+        #     labelnamelist = []
+        # with self.dt[2]:
+        #     #print(f'cropshape={pred.shape}')
+        #     if self.opt.v8:
+        #         from ultralytics.utils.ops import non_max_suppression
+        #     else:
+        #         from utils.general import non_max_suppression
+        #     pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, self.opt.classes,
+        #                                self.opt.agnostic_nms, max_det=self.opt.max_det)
+
+        #     # Second-stage classifier (optional)
+        #     # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
+        #     if self.classify and Path(self.opt.weights).stem!='persontre': 
+        #         pred = apply_classifier1(pred,self.classifier_model,im,im0s,Path(self.opt.weights).stem)
+        #     # Process predictions
+        #     #print(f'predshape={pred.shape}')
+        # for i, det in enumerate(pred):  # per image
+        #     if time.time()-self.updatetime>300:
+        #         dataele = {
+        #             "algorithmCode": self.dirmodel[channels[i]]['classindex'],
+        #             "algorithmIp":self.dirmodel[channels[i]]['algip'],
+        #             "channel":self.dirmodel[channels[i]]['channel']
+        #         }
+        #         try:
+        #             resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
+        #             resultele = resultele.split(',||')
+        #             resultele = tuple(resultele)
+        #             point = '%s:'*len(resultele) %resultele
+        #             self.dirmodel[channels[i]]['point'] = point[:-2]
+        #         except Exception:
+        #             print('post error')
+        #         if Path(self.opt.weights).stem == 'personcount':
+        #             try:
+        #                 resultper = requests.post(url=urlperson,data=dataele).json()['data']
+        #                 personcountdir[channels[i]] = int(resultper)
+        #             except Exception:
+        #                 print('urlpersonerror')
+        #         if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty' :
+        #             datatime= {
+        #                             "algorithmCode": self.dirmodel[channels[i]]['classindex'],
+        #                             "algorithmIp":self.dirmodel[channels[i]]['algip'],
+        #                             "channel":self.dirmodel[channels[i]]['channel']
+        #                         }
+        #             try:
+        #                 resulttime = requests.post(url=urltime,data=dataele).json()['data']
+        #                 self.dirmodel[channel]['durtime'] = int(resulttime)
+        #             except Exception:
+        #                 print('posttime error')
+        #         self.updatetime = time.time()
+        #     seen += 1
+        #     if self.webcam:  # batch_size >= 1
+        #         p, im0 = path[i], im0s[i].copy()
+        #         s += f'{i}: '
+        #     else:
+        #         p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
+
+        #     p = Path(p)  # to Path
+
+        #     save_path = str(self.save_dir / p.name)  # im.jpg
+        #     #txt_path = str(self.save_dir / 'labels' / p.stem) + (
+        #        # '' #if dataset.mode == 'image' else f'_{frame}')  # im.txt
+        #     s += '%gx%g ' % im.shape[2:]  # print string
+        #     gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
+        #     imc = im0.copy()  # for save_crop
+        #     annotator = Annotator(im0, line_width=self.opt.line_thickness, example=str(self.names))
+        #     flag = False
+        #     if len(det) and Path(self.opt.weights).stem != 'duty':
+        #         #flag = True
+        #         # Rescale boxes from img_size to im0 size
+        #         det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
+
+        #         # Print results
+        #         for c in det[:, 5].unique():
+        #             n = (det[:, 5] == c).sum()  # detections per class
+        #             s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, "  # add to string
+
+        #         # Write results
+        #         if Path(self.opt.weights).stem in ['arm']:
+        #             personpred = self.personmodel(im[i][None], None, None)
+        #             personpred = non_max_suppression(personpred, self.opt.conf_thres, self.opt.iou_thres, 0,
+        #                                              self.opt.agnostic_nms, max_det=self.opt.max_det)
+        #             if len(personpred[0])==0:
+        #                flag = False
+        #             else:
+        #                 persondet = []
+        #                 personpred =  personpred[0]
+        #                 personpred[:, :4] = scale_boxes(im.shape[2:], personpred[:, :4], im0.shape).round()
+        #                 for *perxyxy,conf,cls in reversed(personpred):
+        #                     print(perxyxy)
+        #                     x1,y1,x3,y3 = perxyxy
+        #                     x1,y1,x3,y3 = int(x1),int(y1),int(x3),int(y3)
+        #                     x2,y2 = x3,y1
+        #                     x4,y4 = x1,y3
+        #                     persondet.append([[x1,y1],[x2,y2],[x3,y3],[x4,y4]])
+        #                 flag = self.taskname.getflag(det,persondet,annotator,self.dirmodel[channels[i]]['fence'],self.dirmodel[channels[i]]['point'],self.names,self.dirmodel[channels[i]]['label'])
+        #         else:
+        #             if Path(self.opt.weights).stem in ['personcount']:
+        #                 flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
+        #                                          self.dirmodel[channels[i]]['point'], self.names,
+        #                                          self.dirmodel[channels[i]]['label'],personcountdir[channels[i]])
+        #             elif Path(self.opt.weights).stem in ['persontre']:
+        #                 flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
+        #                                          self.dirmodel[channels[i]]['point'], self.names,
+        #                                          self.dirmodel[channels[i]]['label'],1,imc)
+        #             else:
+        #                 flag = self.taskname.getflag(det, None,annotator, self.dirmodel[channels[i]]['fence'],
+        #                                          self.dirmodel[channels[i]]['point'], self.names,
+        #                                          self.dirmodel[channels[i]]['label'])
+        yolo_pose_results = self.model.predict(im0s,conf=0.8)
+        yolo_person_results = self.personmodel.predict(im0s,conf=0.8,classes=0)
+        for i,(result ,personresult) in enumerate(zip(yolo_pose_results,yolo_person_results)):
+            flag = False
+            boxes = result.boxes.xyxy.cpu().numpy().tolist()
+            personboxes = personresult.boxes.xyxy.cpu().numpy().tolist()
+            confs = result.boxes.conf.cpu().numpy().tolist()
+            all_keypoints = result.keypoints.data.cpu().numpy().tolist()
+            pose_classfier_results = []
+            for box, conf, keypoints in zip(boxes, confs, all_keypoints):
+                iouflag = False
+                for personbox in personboxes:
+                    iou,_ = compute_IOU(box,personbox)
+                    print(f'judgeiou = {iou}')
+                    if iou >0.5:
+                        iouflag = True
+                        break;
+                if not iouflag:
+                    break
+                x1, y1, x2, y2 = box
+                x, y, w, h = x1, y1, x2 - x1, y2 - y1
+                n_keypoints = [[(kp[0] - x) / w - 0.5, (kp[1] - y) / h - 0.5] if kp[0] > 0 and kp[1] > 0 else kp[:2] for kp in keypoints]
+                n_keypoints = extract_keypoint(self.get_keypoint, n_keypoints)
+                if n_keypoints[-12:].count(0) >= 2 * 2:
+                    continue
+                if n_keypoints.count(0) >= 4 * 2:
+                    continue
+                if w < h:
+                    continue
+                pose_data = torch.Tensor([n_keypoints]).to(self.device)
+                pose_data = pose_data.reshape(1, 13, 2)
+                with torch.no_grad():
+                    p = self.pose_classfier_model(pose_data)
+                    prob = F.softmax(p)
+                    index = prob.argmax()
+                    if index == 0:
+                        score = float(prob[0][index].cpu().numpy())
+                        pose_classfier_results.append(
+                            Box(left=x1, top=y1, right=x2, bottom=y2, box_conf=conf, pose_classifer_conf=score, label="falling"))
+            image = im0s[i]
+            imc = im0s[i].copy()
+            for res in pose_classfier_results:
+                flag = True
+                cv2.rectangle(image, (int(res.left), int(res.top)), (int(res.right), int(res.bottom)), (0, 255, 0), 2)
+            if flag:
+                #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
+                self.dirmodel[channels[i]]['detframe'].pop(0)
+                self.dirmodel[channels[i]]['detframe'].append(1)
+                self.dirmodel[channels[i]]['preim'] = image
+                self.dirmodel[channels[i]]['oripreim'] = imc
+                self.dirmodel[channels[i]]['posttime'] = videotime[i]
+                print(self.dirmodel[channels[i]]['detframe'])
+                #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
+            else:
+                #print(f'deti= {i}')
+                #print(detframe[i])
+                #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
+                self.dirmodel[channels[i]]['detframe'].pop(0)
+                self.dirmodel[channels[i]]['detframe'].append(0)
+                print(self.dirmodel[channels[i]]['detframe'])
+                #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
+                #print(detframe[i])
+            # Stream results
+            #im0 = annotator.result()
+            #print(f'i = {i}')
+            #print(channels[i])
+            #print(postpretime[i])
+            #print(detframe[i])
+            if not self.dirmodel[channels[i]]['detflag'] and self.dirmodel[channels[i]]['detframe'].count(1)>=1:
+                self.dirmodel[channels[i]]['detflag'] = True
+                self.dirmodel[channels[i]]['detpretime'] = time.time()
+            elif self.dirmodel[channels[i]]['detframe'].count(1)==0 :
+                self.dirmodel[channels[i]]['detflag'] = False
+                self.dirmodel[channels[i]]['detpretime'] = float('inf')
+            # Stream results
+            #im0 = annotator.result()
+            if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and time.time() - self.dirmodel[channels[i]]['detpretime'] > self.dirmodel[channels[i]]['durtime']  and self.dirmodel[channels[i]]['detflag']:
+            #print()
+            #if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and self.dirmodel[channels[i]]['detflag']:
+                #print(time.time() - self.dirmodel[channels[i]]['detpretime'])
+                #print(self.dirmodel[channels[i]]['detflag'])
+                print('post-------------------------------------------------------------------------')
+                #time.sleep(30)
+             #print(time.time() - postpretime[i])
+                #print('111111111111111111111111111111111111111111111111')
+                #print(dirmodel[channels[i]]['preim'].shape)
+                success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim'])
+                content = encoded_image.tobytes()
+                successori, encoded_imageori = cv2.imencode('.jpg', self.dirmodel[channels[i]]['oripreim'])
+                contentori = encoded_imageori.tobytes()
+                filename = f'{int(time.time())}.jpg'
+                filenameori = f'ori_{int(time.time())}.jpg'
+                #print(f'str(p) {p.name}')
+                print(channels[i])
+                payload = {'channel': self.dirmodel[channels[i]]['channel'],
+                                   'classIndex': self.dirmodel[channels[i]]['classindex'],
+                                   'ip': self.dirmodel[channels[i]]['algip'],
+                                   'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', self.dirmodel[channels[i]]['posttime']),
+                                   'videoUrl': channels[i]}
+                files = [
+                            ('file', (filename, content, 'image/jpeg')),
+                            ('oldFile', (filenameori, contentori, 'image/jpeg')),
+                        ]
+                try:
+                    result = requests.post(url, data=payload, files=files)
+                    print(result)
+                except Exception:
+                    print('posterror')
+                #time.sleep(3000)
+                self.dirmodel[channels[i]]['postpretime'] = time.time()
+                self.dirmodel[channels[i]]['detflag'] = False
+                timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime'])
+                year = time.strftime('%Y',time.localtime(time.time()))
+                month = time.strftime('%m',time.localtime(time.time()))
+                day = time.strftime('%d',time.localtime(time.time()))
+                savefold = f'/mnt/project/images/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
+                savefold = Path(savefold)
+                savefold.mkdir(parents=True,exist_ok=True)
+                detsavefold = f'/mnt/project/detimages/{Path(self.opt.weights).stem}/{year}/{month}/{day}'
+                detsavefold = Path(detsavefold)
+                detsavefold.mkdir(parents=True,exist_ok=True)
+                cv2.imwrite(f'{savefold}/{timesave}.png',self.dirmodel[channels[i]]['oripreim'])
+                cv2.imwrite(f'{detsavefold}/{timesave}det.png',self.dirmodel[channels[i]]['preim'])
+            #if self.dirmodel[channels[i]]['detframe'].count(1)==0:
+            #    self.dirmodel[channels[i]]['detflag'] = False
+                #time.sleep(1)
+
+            self.view_img = False
+            if self.view_img:
+                if platform.system() == 'Linux' and p not in windows:
+                    windows.append(p)
+                    cv2.namedWindow(f'{str(p)}-{Path(self.opt.weights).stem}',
+                                    cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
+                    cv2.resizeWindow(f'{str(p)}-{Path(self.opt.weights).stem}', im0.shape[1], im0.shape[0])
+                im1 = cv2.resize(im0, (1280, 720))
+                cv2.imshow(f'{str(p)}-{Path(self.opt.weights).stem}', im1)
+                cv2.waitKey(1)  # 1 millisecond
+
+            # Save results (image with detections)
+
+            # Print time (inference-only)
+            print(f'channels[i]={channels[i]}')
+            #LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms {str(p)}-{Path(self.opt.weights).stem}")
+
+    def load(self):
+        conn = sqlite3.connect(self.source)
+        c = conn.cursor()
+        while True:
+            #
+            # print ("数据库打开成功")
+
+            cursor = c.execute(
+                "SELECT modelname, addstream,delstream,streaming  from CHANGESTREAM WHERE modelname= (?)",(Path(self.opt.weights).stem,))
+            # content = cursor.fetchall()
+            # if content[0][1] ==1 or content[0][2] ==1:
+            #     c.execute("UPDATE CHANGESTREAM set streamimg = 1 where modelname='yolov5s'")
+            #     print("updata changestream")
+            #     conn.commit()
+            # cursor = c.execute(
+            # "SELECT modelname, addstream,delstream,streamimg  from CHANGESTREAM WHERE modelname='yolov5s'")
+            self.contentid = cursor.fetchall()
+            #global tag
+            #tag = Value('i', self.contentid[0][3])
+            #print(tag.value==1)
+            print(f'loadcontent={self.contentid[0][3]}')
+            time.sleep(3)
+        c.close()
+        conn.close()
+    def readpoint(self):
+        data = {
+                    "algorithmCode": '25',
+                    "deviceIp":None,
+                    'fwqCode':None
+                }
+        self.dirmodel = {}
+        result = requests.post(url=urlt,data=data).json()['data']
+        channell=[]
+        for info in result: 
+        #content = cursor.fetchall()
+        #self.dirmodel = {}
+        #for address,fence,point,channel,classindex,ip ,algip,label,durtime in content:
+            #address = f'{address[:-1]}0'
+            channel = info["deviceChannel"]
+            channell.append(channel)
+            self.dirmodel[channel] = {}
+            self.dirmodel[channel]['fence'] = 1 if len(info["electricFence"])>0 else 0
+            if Path(self.opt.weights).stem == "uniform":
+                self.dirmodel[channel]['fence'] = 1
+            #self.dirmodel[channel]['point'] = point
+            self.dirmodel[channel]['channel'] = info['deviceChannel']
+            self.dirmodel[channel]['classindex'] = info['algorithmCode']
+            self.dirmodel[channel]['ip'] = info['deviceIp']
+            self.dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
+            dataele = {
+                    "algorithmCode": self.dirmodel[channel]['classindex'],
+                    "algorithmIp":self.dirmodel[channel]['algip'],
+                    "channel":self.dirmodel[channel]['channel']
+                }
+            resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
+            resultele = resultele.split(',||')
+            resultele = tuple(resultele)
+            point = '%s:'*len(resultele) %resultele
+            if Path(self.opt.weights).stem == 'personcount':
+                resultper = requests.post(url=urlperson,data=dataele).json()['data']
+                personcountdir[channel] = int(resultper)
+            if len(point[:-2])<=1 and Path(self.opt.weights).stem == "uniform":
+                self.dirmodel[channel]['point'] = "256#144,1024#144,1024#576,256#576"
+            else:
+                self.dirmodel[channel]['point'] = point[:-2]
+            self.dirmodel[channel]['preim'] = None
+            self.dirmodel[channel]['oripreim'] = None
+            self.dirmodel[channel]['detframe'] = [0 for _ in range(2)]
+            self.dirmodel[channel]['postpretime'] = 0
+            self.dirmodel[channel]['detflag'] = False
+            self.dirmodel[channel]['detpretime'] = float('inf')
+            self.dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
+            if Path(self.opt.weights).stem == 'sleep' or Path(self.opt.weights).stem == 'duty' :
+                datatime= {
+                        "algorithmCode": self.dirmodel[channel]['classindex'],
+                        "algorithmIp":self.dirmodel[channel]['algip'],
+                        "channel":self.dirmodel[channel]['channel']
+                        }
+                resulttime = requests.post(url=urltime,data=dataele).json()['data']
+                self.dirmodel[channel]['durtime'] = int(resulttime)
+            else:
+                self.dirmodel[channel]['durtime'] = 0
+            self.dirmodel[channel]['posttime'] = 0
+        print(self.dirmodel)
+        return sorted(channell)
+    #    str = str.split(":")
+    #    lista = []
+    #    for liststr in str:
+    #        if len(liststr) > 0:
+    #            li = liststr.split(',')
+    #            listpoint = []
+    #            for i, j in zip(li[::2], li[1::2]):
+    #                listpoint.append((i, j))
+    #            lista.append(listpoint)
+    #    return listpoint
+
+
+#def preprocess():
+#        print('preprocess-----------------------------------------------')
+
+def getframe(queuelist,channelsl,source,tt,numworks,lock,numworkv):
+    while True:
+        print("dataloader")
+        imgsz = [640, 640]
+        print(f'source = {source}')
+        dataset = LoadStreamsSQLTN(channelsl,source, img_size=640,
+                                      auto=True, vid_stride=20, tt=tt,numworks = numworks)
+        bs = len(dataset)
+        vid_path, vid_writer = [None] * bs, [None] * bs
+        # self.detframe = [[0 for _ in range(8)] for i in range(bs)]
+        # self.postpretime = [0]*bs
+        # Run inference
+
+        #imgsz = (1 , 3, *self.imgsz)
+        print(imgsz)
+        #self.model.warmup(imgsz=(1 , 3, *imgsz))  # warmup
+        seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+
+        #
+        # print ("数据库打开成功")
+        pretime = time.time()
+        tag = 0
+        sourcebase = 'project0117.db'
+        for path, im, im0s, vid_cap, s, videotime,channels in dataset:
+            # print('*'*21)
+            # global tag
+            # print('datasetvideo')
+            # if time.time()-pretime > 300:
+            #     pretime = time.time()
+
+            #     conn = sqlite3.connect(sourcebase)
+            #     c = conn.cursor()
+            #     cursor = c.execute("SELECT modelname, addstream,delstream,streaming  from CHANGESTREAM WHERE modelname= 'helmet'")
+            #     contentid = cursor.fetchall()
+            #     tag = contentid[0][3]
+            # if tag == 1:
+            #     lock.acquire()
+            #     numworkv.value += 1
+            #     dataset.close()
+            #     if numworkv.value==3:
+            #         print('newstreaming=', tag)
+            #         conn = sqlite3.connect(source)
+            #         c = conn.cursor()
+            #         c.execute("UPDATE CHANGESTREAM set streaming = 0 , addstream=0,delstream=0 where modelname='helmet'")
+            #         print(123)
+            #         conn.commit()
+            #         c.close()
+            #         conn.close()
+            #     lock.release()
+            #     print('opencv1')
+            #     # cv2.destroyAllWindows()
+            #     print('opencv')
+            #     break
+            # else:
+            #     print('nonewstreaming=', tag)
+            if time.time()-pretime > 300:
+                channellist = []
+                pretime = time.time()
+                data = {
+                    "algorithmCode": '25',
+                    "deviceIp":None,
+                    "fwqCode":None
+
+                }
+                try:
+                    result = requests.post(url=urlt,data=data).json()['data']
+                except Exception:
+                    result = []
+                for info in result:
+                    data = {
+                        "channel": info["deviceChannel"],
+                    "ip": info["deviceAlgorithmIp"]
+                    }
+                    chaflag = any(info["deviceChannel"] in t for t in channellist)
+        #personcountdir[channel] = num
+                    if not chaflag:
+                        address = requests.post(url=urlrtsp,data=data).json()['msg']
+                        channellist.append((info['deviceChannel'],address))
+                channelsa  = []
+                sourcea = []
+                channellist = set(channellist)
+                channellist = sorted(channellist,key=lambda x:x[0])
+                #channellist = set(channellist)
+                for cha,add in channellist:
+                    channelsa.append(cha)
+                    sourcea.append(add)
+                channelsl = sorted(channelsl)
+                #channelsa = sorted(channelsa)
+                if channelsa!=channelsl and len(channelsa)>0:
+                    print(f'channelsa = {channelsa}')
+                    print(f'channelsl = {channelsl}')
+                    dataset.close()
+                    channelsl = channelsa
+                    source = sourcea
+                    break;
+            for key,value in queuelist.items():
+                hour = time.localtime(time.time()).tm_hour
+                if hour in range(7,18):
+                    value[-1].put((path, im, im0s, vid_cap, s, videotime,channels))
+                    value[-1].get() if value[-1].qsize() == 10 else time.sleep(0.001)
+
+def getmutpro(channels,source,streamlist,numworkv,lock,numworks=1,modellen=None):
+    processlist = []
+    queuelist = {}
+    for i in range(numworks):
+        for model in modellen:    
+            queue = Queue(maxsize=10)
+            queuelist.setdefault(model,[])
+            queuelist[model].append(queue)
+        process = Process(target=getframe,
+                        args=(queuelist, channels,source, i,numworks,lock,numworkv))
+        processlist.append(process)
+        process.start()
+        #queuelist.append(queue)
+    return queuelist
+
+#    path = []
+#    im0s = []
+#    vid_cap = None
+#    s = ''
+#    videotime = []
+#    while True:
+#        imlist = []
+#        pathlist = []
+#        im0slist = []
+#        channelslist = []
+#        vid_cap = None
+#        s = ''
+#        videotimelist = []
+#        for q in queuelist:
+#            if q.qsize()>0:
+#                setframe = q.get()
+#                path, im, im0s, vid_cap, s, videotime ,channels = setframe
+#                pathlist += path
+#                channelslist +=channels 
+#                im0slist += im0s
+#                videotimelist += videotime
+#                imlist.append(im)
+#        if len(imlist)>0:
+#            im = np.concatenate(imlist)
+#        if len(pathlist)>0:
+#            print(len(path),im.shape,len(im0s))
+#            streamlist.append((pathlist, im, im0slist, vid_cap, s, videotimelist,channelslist))
+            #print(f'streamlist = {len(streamlist)}')
+#        streamlist.pop(0) if len(streamlist) > 3 else time.sleep(0.001)
+
+def modelfun(queue,weights,sourcedb,classes,device,classify,conf_thres,runmodel,v8=False):
+    print(weights)
+    detectdemo=Detect(weights=weights,source=sourcedb,classes=classes,device=device,classify=classify,conf_thres=conf_thres,v8=v8)
+    detectdemo.infer(queue,runmodel)
+
+def parse_opt():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
+    opt = parser.parse_args()
+    return opt
+
+
+def main(opt):
+    check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+    run(**vars(opt))
+
+
+if __name__ == '__main__':
+    #torch.multiprocessing.set_start_method('spawn')
+    #set_start_method('spawn')
+    opt = parse_opt()
+    dbpath = 'projectnew.db'
+    
+    conn = sqlite3.connect(dbpath)
+#
+# print ("数据库打开成功")
+    c = conn.cursor()
+    task(c,conn,urlt,urla)
+    cursor = c.execute('select channel,algip  from stream where modelname = "fall"')
+    result = cursor.fetchall()
+    for channel ,algip in result:
+        data = {
+        "channel": channel,
+        "ip":algip
+        }
+        #personcountdir[channel] = num
+        address = requests.post(url=urlrtsp,data=data).json()['msg']
+        c.execute('UPDATE STREAM set address= (?) where channel =(?)',(address,channel))
+    conn.commit()
+    cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
+    #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+    content = cursor.fetchall()
+    cursor = c.execute("SELECT address,channel from STREAM where modelname = 'fall'")
+    #cursor = c.execute("SELECT address from STREAM where modelname = 'helmet'")
+    contenta = cursor.fetchall()
+    source = []
+    modellist = []
+    addcha = []
+    channellist = []
+    for i in contenta:
+        addcha.append((i[0],i[1]))
+        #modellist.append(i[1])
+    addcha = set(addcha)
+    addcha = sorted(addcha,key=lambda x:x[1])
+    for add,cha in addcha:
+        source.append(add)
+        channellist.append(cha)
+    #source = set(source)
+    print(addcha)
+    source = list(source)
+    cursor = c.execute("SELECT modelname from STREAM where (modelname ='fall')")
+    contentm = cursor.fetchall()
+    for m in contentm:
+        modellist.append(m[0])
+    modellist = set(modellist)
+    modellist = list(modellist)
+    contentlist = []
+    for i in content:
+        contentlist.append(i[0])
+    #source.sort()
+    n = len(content)
+    print(f'modelname={n}')
+    print(content)
+    #content.reverse()
+    print(content)
+    print(source)
+    # main(opt)
+    #processes = []
+    streamqueue = Queue(maxsize=4)
+    numworkv = Value('i', 0)
+    manager = Manager()
+    lock = multiprocessing.Lock()
+    streamlist = manager.list()
+    numworks = 3
+    modellen = []
+    for i in modellist:
+        if i in contentlist:
+            modellen.append(i)
+    queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks,modellen)
+    deid = 0
+    #pool = ThreadPoolExecutor(max_workers=n)
+    runmodel = manager.dict()
+    while True:
+        for i in modellist:
+            if i in contentlist:
+                if i not in runmodel:
+            #print(i)
+            #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
+                    c.execute('select conf,cla from changestream where modelname = (?)',(i,))
+                    rea = c.fetchall()
+                    print(f'weights ={i[0]}.pt')
+                    if i in ['duty','danager','inspection','cross','personcount']:
+                        process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,[0],0,rea[0][1],rea[0][0],runmodel))
+                    else:
+                        if i in ['uniform','arm','helmet']:
+                            process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel,True))
+                        else:
+                            process = Process(target=modelfun,args=(queuelist[i],f'{i}.pt',dbpath,None,0,rea[0][1],rea[0][0],runmodel))
+
+                #elif i in ['helmet','fire','smoke','fall']:
+                #process = Process(target=modelfun,args=(queuelist,f'{i}.pt',dbpath,None,deid%2,True))
+                #else:
+                #process = Process(target=modelfun,args=(queuelist,f'{i}.pt',dbpath,None,deid%2,False))
+
+                #processes.append(process)
+                #process.start()
+                #detectobj = Process(target=detectdemo.infer,args=(queue,))
+                # Detect(weights=f'{i[0]}.pt')
+
+                    time.sleep(3)
+                    process.start()
+                    deid = deid+1
+                    runmodel[i] = 1
+        time.sleep(600)
+        task(c,conn,urlt,urla)
+        #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'fall' or modelname = 'uniform' or modelname = 'personcount' or modelname = 'arm' or modelname = 'bag'")
+        #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+        cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
+        content = cursor.fetchall()
+        contentlist = []
+        for con in content:
+            contentlist.append(con[0])
+        #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
+        cursor = c.execute("SELECT address,channel from STREAM where modelname = 'fall'")
+        contenta = cursor.fetchall()
+        source = []
+        modellist = []
+        addcha = []
+        channellist = []
+        for i in contenta:
+            addcha.append((i[0],i[1]))
+            #modellist.append(i[1])
+        addcha = set(addcha)
+        addcha = sorted(addcha)
+        for a,cha in addcha:
+            source.append(a)
+            channellist.append(cha)
+        print(addcha)
+        #source = set(source)
+        source = list(source)
+        #source.sort()
+        cursor = c.execute("SELECT modelname from STREAM where (modelname = 'fall')")
+        contentm = cursor.fetchall()
+        for m in contentm:
+            modellist.append(m[0])
+        modellist = set(modellist)
+        n = len(content)
+        print(f'modelname={n}')
+        print(content)
+        #content.reverse()
+        print(content)
+        #pool.submit(detectobj.infer)
+
+    #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
+    #content = cursor.fetchall()
+    #n = len(content)
+    #print(f'modelname={n}')
+    #print(content)
+    #content.reverse()
+    #print(content)
+    # main(opt)
+    #processes = []
+    #pool = ProcessPoolExecutor(max_workers=n)
+    #for i in content:
+        #print(i)
+        #detectdemo=Detect(weights=f'{i[0]}.pt')
+        #process = Process(target=detectdemo.infer)
+        #processes.append(process)
+        #process.start()
+        #detectobj = Detect(weights=f'{i[0]}.pt')
+    #    time.sleep(3)
+        #pool.submit(detectobj.infer)
+    #    print('111111111111111111111111111111111111111111111111111111111')
+        #pool.submit(TestA().func1, '张三', i)
+        #print('----------------------------------------------------------------')
+    #time.sleep(3000)
+    # 等待所有进程执行完毕
+    #for process in processes:
+    #    process.join()
+
+        #pool.submit(Detect(weights=f'{i[0]}.pt').infer)
+    # if isinstance(opt.weights,list):
+    #     opt.weights = opt.weights[0]
+    #signal.signal(signal.SIGINT, my_handler)
+    #detectdemo1 = Detect(weights=f'{content[0][0]}.pt')
+    #detectdemo1.infer()
+    #a = Test
+    #with ProcessPoolExecutor(3) as ppool:
+        #for i in range(3):
+    #        print('hello')
+                        #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
+            #ppool.submit(TestA().func1, '张三', i)
+    #ta = TestA()
+    #with ProcessPoolExecutor(5) as ppool: # 创建一个5个进程的进程池
+    #    for i in range(1, 4):
+    #        ppool.submit(func1, '张三', i)
+    #f1= pool.submit(detectdemo1.infer)
+    # print("线程1-----------------------------------------------------------------------------------")
+    #detectdemo2 = Detect(weights=r"helmet.pt")
+    #f2=pool.submit(detectdemo2.infer)
+    # print("线程2-------------------------------------------------------------------------------------")
+    #detectdemo3 = threading.Thread(target=detectdemo3.infer)
+    #detectdemo3 = Detect(weights=r"fall.pt")
+    #f3=pool.submit(detectdemo3.infer)

+ 857 - 0
detectopencvthrjump.py

@@ -0,0 +1,857 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
+
+Usage - sources:
+    $ python detect.py --weights yolov5s.pt --source 0                               # webcam
+                                                     img.jpg                         # image
+                                                     vid.mp4                         # video
+                                                     screen                          # screenshot
+                                                     path/                           # directory
+                                                     list.txt                        # list of images
+                                                     list.streams                    # list of streams
+                                                     'path/*.jpg'                    # glob
+                                                     'https://youtu.be/Zgi9g1ksQHc'  # YouTube
+                                                     'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
+
+Usage - formats:
+    $ python detect.py --weights yolov5s.pt                 # PyTorch
+                                 yolov5s.torchscript        # TorchScript
+                                 yolov5s.onnx               # ONNX Runtime or OpenCV DNN with --dnn
+                                 yolov5s_openvino_model     # OpenVINO
+                                 yolov5s.engine             # TensorRT
+                                 yolov5s.mlmodel            # CoreML (macOS-only)
+                                 yolov5s_saved_model        # TensorFlow SavedModel
+                                 yolov5s.pb                 # TensorFlow GraphDef
+                                 yolov5s.tflite             # TensorFlow Lite
+                                 yolov5s_edgetpu.tflite     # TensorFlow Edge TPU
+                                 yolov5s_paddle_model       # PaddlePaddle
+"""
+import matplotlib.path as mat
+import requests
+import argparse
+import os
+import platform
+import sqlite3
+import sys
+import threading
+import time
+from pathlib import Path
+import signal
+import torch
+from concurrent.futures import ThreadPoolExecutor
+from concurrent.futures import ProcessPoolExecutor
+from multiprocessing import Process,Manager,Value
+from multiprocessing import Queue
+from multiprocessing import set_start_method
+import multiprocessing
+import multiprocessing as mp
+import numpy as np
+import platform
+import pathlib
+from collections import defaultdict, deque
+#import websockets
+import asyncio
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[0]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+import math
+from models.common import DetectMultiBackend
+from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLTN
+from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
+                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,strtolstl,apply_classifier1,task)
+from utils.plots import Annotator, colors, save_one_box
+from utils.torch_utils import select_device, smart_inference_mode
+#from testpool import func1,TestA
+from ultralytics import YOLO
+from person_jump_check import personJump
+# def my_handler(signum, frame):
+#     exit(0)
+#url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
+plt = platform.system()
+if plt != 'Windows':
+  pathlib.WindowsPath = pathlib.PosixPath
+urlhead = "http://172.19.152.231"
+url = f"{urlhead}/open/api/operate/upload"
+urlele = f"{urlhead}/open/api/operate/fence"
+urlperson = f"{urlhead}/open/api/operate/getPersonLimitNum"
+urlt = f"{urlhead}/open/api/operate/taskList"
+urla = f"{urlhead}/open/api/operate/algorithmList"
+weburl = f"ws://36.7.84.146:28801/websocket/device"
+personcountdir = {}
+algdir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'duty','98':'face','51':'run','64':'jump'}
+modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person','64':'person'}
+modelalgdir = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51','jump':'64'}
+algmodel = {}
+for key,value in algdir.items():
+    algmodel[value] = key
+
+def map_to_ellipse(position):
+    x, y = position
+    center_x = 640
+    center_y = 360
+    a = 640
+    b = 360
+
+    x_norm = x / 1280
+    y_norm = y / 720
+
+    d_norm = math.sqrt((x_norm - 0.5) ** 2 + (y_norm - 0.5) ** 2)
+    theta_norm = math.atan2(y_norm - 0.5, x_norm - 0.5)
+    f = d_norm
+    a_new = a * f
+    b_new = b * f
+
+    bias_x = center_x + a_new * math.cos(theta_norm)
+    bias_y = center_y + b_new * math.sin(theta_norm)
+
+    return np.array([bias_x, bias_y])
+
+class YoloOpt:
+    def __init__(self, weights=ROOT / 'yolov5s.pt',source=ROOT / 'data/images',data=ROOT / 'data/coco128.yaml',
+                 imgsz=(640,640),
+                 conf_thres=0.80,
+                 iou_thres=0.45,
+                 max_det=1000,
+                 device='',
+                 view_img=False,
+                 save_txt=False,
+                 save_conf=False,
+                 save_crop=False,
+                 nosave=True,
+                 classes=None,
+                 agnostic_nms=False,
+                 augment=False,
+                 visualize=False,
+                 update=False,
+                 project=ROOT / 'runs/detect',
+                 name='exp',
+                 exist_ok=False,
+                 line_thickness=1,
+                 hide_labels=False,
+                 hide_conf=False,
+                 half=False,
+                 dnn=False,
+                 vid_stride=10,
+                 classify=False):
+
+        self.weights = weights  # 权重文件地址
+        self.source = source  # 待识别的图像
+        self.data = data
+        if imgsz is None:
+            self.imgsz = (640, 640)
+        self.imgsz = imgsz  # 输入图片的大小,默认 (640,640)
+        self.conf_thres = conf_thres  # object置信度阈值 默认0.25  用在nms中
+        self.iou_thres = iou_thres  # 做nms的iou阈值 默认0.45   用在nms中
+        self.device = device  # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法
+        self.view_img = view_img  # 是否展示预测之后的图片或视频 默认False
+        self.classes = classes  # 只保留一部分的类别,默认是全部保留
+        self.agnostic_nms = agnostic_nms  # 进行NMS去除不同类别之间的框, 默认False
+        self.augment = augment  # augmented inference TTA测试时增强/多尺度预测,可以提分
+        self.update = update  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+        self.exist_ok = exist_ok  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+        self.project = project  # 保存测试日志的参数,本程序没有用到
+        self.name = name  # 每次实验的名称,本程序也没有用到
+        self.max_det = max_det
+        self.save_txt = save_txt
+        self.save_conf= save_conf
+        self.save_crop= save_crop
+        self.nosave = nosave
+        self.visualize = visualize
+        self.line_thickness = line_thickness
+        self.hide_labels = hide_labels
+        self.hide_conf = hide_conf
+        self.half = half
+        self.dnn = dnn
+        self.vid_stride = vid_stride
+        self.classify = classify
+class Detect:
+    def __init__(self, weights = ROOT / 'yolov5s.pt' , imgsz=(640,640),source="changshusql1103.db",classify=False,conf_thres=0.80,device='',channelsl=''):
+        print(f'detectweights = {weights}')
+        self.opt = YoloOpt(weights=weights, imgsz=imgsz,source=source,classify=classify,conf_thres=conf_thres,device=device)
+        self.source = str(self.opt.source)
+        self.save_img = not self.opt.nosave and not source.endswith('.txt')  # save inference images
+        is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
+        is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
+        self.webcam = True
+        screenshot = self.source.lower().startswith('screen')
+        if is_url and is_file:
+            self.source = check_file(self.source)  # download
+        self.save_dir = increment_path(Path(self.opt.project) / self.opt.name, exist_ok=self.opt.exist_ok)  # increment run
+        #self.save_dir = self.save_dir / Path(self.opt.weights).stem
+        #self.save_dir.mkdir(parents=True, exist_ok=True)
+        (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)  # make dir
+
+        #self.imgsz = check_img_size(self.opt.imgsz, s=self.stride)
+
+        # self.readchannel = self.readpoint()
+        #print(self.imgsz)
+        self.updatetime = time.time()
+        #self.filetime = os.path.getmtime(self.opt.weights)
+        bs = 1  # batch_size
+        if self.webcam:
+            #self.view_img = check_imshow(warn=True)
+            self.view_img = False
+            # dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
+            tt= 0
+            numworks = 1
+            self.dataset = LoadStreamsSQLTN(channelsl, source, img_size=640,
+                                        auto=True, vid_stride=1, tt=tt, numworks=numworks)
+            # bs = len(dataset)
+        elif screenshot:
+            dataset = LoadScreenshots(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt)
+        else:
+            self.dataset = LoadImages(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt, vid_stride=self.opt.vid_stride)
+        self.speed_threshold = 40
+        self.high_velocity_count_threshold = 6
+        # t1 = threading.Thread(target=self.load,daemon=True)
+        # t1.start()
+    @smart_inference_mode()
+    def infer(self,weights,classify,conf_thres,device,runmodel):
+        device = select_device(device)
+        print("loadmodel device{device}")
+        #region_points = [(820, 200), (1600, 200), (1600, 530), (820, 530)]
+        #region_points = [[259, 200], [2258, 200], [2258, 943], [259, 943]]
+        #region_points = [[374,1224],[380,1237],[426,1237],[435,1237],[479,1230],[527,1201],[552,1187],[575,1166],[583,1155],[585,1136],[583,1126],[558,1124],[485,1178],[374,1210]]
+        region_points = [(214, 519),(235, 512),(251, 503),(269, 496),(283, 486),(296, 482),(302, 493),(296, 507),(283, 517),(275, 526),(247, 533),(227, 538),(209, 538),(207, 529),(203, 521)]
+        counter = personJump(
+            # show_in = False,
+            # show_out = False,
+            show=False,
+            region=region_points,
+            model=weights,
+            classes=[0],
+            conf=0.4,
+        )
+        # stride, names, pt = model.stride, model.names, model.pt
+        if classify:
+            classifier_model = torch.load(f"{Path(weights).stem}cls.pt")
+            classifier_model = classifier_model.to(device)
+            classifier_model.eval()
+            print('classify--------------------------------------------------------------------')
+        #imgsz = check_img_size(self.opt.imgsz, s=stride)
+        #model.warmup(imgsz=(1, 3, *imgsz))
+        readchannel,dirmodel = self.readpoint(weights)
+        #print(imgsz)
+        updatetime = time.time()
+        filetime = os.path.getmtime(weights)
+        #selfreadpoint();
+        pretime = time.time()
+        seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+        #
+        # print ("数据库打开成功")
+        #async with websockets.connect(uri) as websocket:
+        # for key in dirmodel.keys():
+        #     dirmodel[key]['websoc'] = await websockets.connect(dirmodel[key]['web'])
+        for path, im, im0s, vid_cap, s, videotime,channels in self.dataset:
+            hour = time.localtime(time.time()).tm_hour
+            if hour not in range(7,18):
+                time.sleep(30)
+                continue
+            i = 0
+            ima = im0s[i].copy()
+            imc = ima.copy()
+            flag = False
+            #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
+            #if len(queue) == 0:
+                # print('queuezero')
+            #    time.sleep(0.01)
+            #print(123)
+            im0 = counter.count(ima)
+            #print(456)
+            if counter.in_count > 0:
+                flag = True
+                counter.in_count = 0
+            #print(f'timee = {time.time()-timee}')
+
+            #flag = True
+            if flag:
+                #if self.dirmodel[path[i]]['imgtime'] != videotime[i]:
+                dirmodel[channels[i]]['detframe'].pop(0)
+                dirmodel[channels[i]]['detframe'].append(1)
+                dirmodel[channels[i]]['preim'] = im0
+                dirmodel[channels[i]]['oripreim'] = imc
+                dirmodel[channels[i]]['posttime'] = videotime[i]
+                print(dirmodel[channels[i]]['detframe'])
+                #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
+            else:
+                #print(f'deti= {i}')
+                #print(detframe[i])
+                #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
+                dirmodel[channels[i]]['detframe'].pop(0)
+                dirmodel[channels[i]]['detframe'].append(0)
+                print(dirmodel[channels[i]]['detframe'])
+                #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
+                #print(detframe[i])
+            # Stream results
+            #im0 = annotator.result()
+            #print(f'i = {i}')
+            #print(channels[i])
+            #print(postpretime[i])
+            #print(detframe[i])
+            if not dirmodel[channels[i]]['detflag'] and dirmodel[channels[i]]['detframe'].count(1)>=1:
+                dirmodel[channels[i]]['detflag'] = True
+                dirmodel[channels[i]]['detpretime'] = time.time()
+            elif dirmodel[channels[i]]['detframe'].count(1)==0 :
+                dirmodel[channels[i]]['detflag'] = False
+                dirmodel[channels[i]]['detpretime'] = float('inf')
+            # Stream results
+            #im0 = annotator.result()
+
+            if time.time() - dirmodel[channels[i]]['postpretime'] >30 and time.time() - dirmodel[channels[i]]['detpretime'] > dirmodel[channels[i]]['durtime']  and dirmodel[channels[i]]['detflag']:
+            #print()
+            #if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and self.dirmodel[channels[i]]['detflag']:
+                #print(time.time() - self.dirmodel[channels[i]]['detpretime'])
+                #print(self.dirmodel[channels[i]]['detflag'])
+                #print(f'{Path(self.opt.weights).stem}post----{time.strftime("%Y-%m-%d %H:%M:%S", time.time()}')
+                #time.sleep(30)
+             #print(time.time() - postpretime[i])
+                #print('111111111111111111111111111111111111111111111111')
+                #print(dirmodel[channels[i]]['preim'].shape)
+                success, encoded_image = cv2.imencode('.jpg', dirmodel[channels[i]]['preim'])
+                content = encoded_image.tobytes()
+                successori, encoded_imageori = cv2.imencode('.jpg', dirmodel[channels[i]]['oripreim'])
+                contentori = encoded_imageori.tobytes()
+                filename = f'0_{int(time.time())}.jpg'
+                filenameori = f'0_{int(time.time())}.jpg'
+                #print(f'str(p) {p.name}')
+                print(channels[i])
+                payload = {'channel': dirmodel[channels[i]]['channel'],
+                                   'classIndex': dirmodel[channels[i]]['classindex'],
+                                   'ip': dirmodel[channels[i]]['algip'],
+                                   'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', dirmodel[channels[i]]['posttime']),
+                                   'videoUrl': channels[i]}
+
+                fourcc = cv2.VideoWriter_fourcc(*'MP4V')
+                fps = 6
+                height,width,_ = dirmodel[channels[i]]['preim'].shape
+                year=time.strftime('%Y',time.localtime(time.time()))
+                month=time.strftime('%m',time.localtime(time.time()))
+                day=time.strftime('%d',time.localtime(time.time()))
+                vidsavefold = f'/mnt/yolo/videos/{Path(weights).stem}/{year}/{month}/{day}'
+                vidsaveflod = Path(vidsavefold)
+                vidsaveflod.mkdir(parents=True, exist_ok=True)
+                timesave = time.strftime('%Y-%m-%d-%H:%M:%S', dirmodel[channels[i]]['posttime'])
+                #out = cv2.VideoWriter(f'{vidsaveflod}/{timesave}.mp4', fourcc, fps, (width, height))
+                #for imgframe in self.dirmodel[channels[i]]['framelist']:
+                    #success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim'])
+                    #content = encoded_image.tobytes()
+                #    out.write(imgframe)
+                #out.release()
+                files = [
+                            ('file', (filename, content, 'image/jpeg')),
+                            ('oldFile',(filenameori, contentori, 'image/jpeg')),
+                            #('videoFile',open(f'{vidsaveflod}/{timesave}.mp4','rb'))
+                        ]
+
+                print(f'{Path(weights).stem}post----{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))}')
+                
+                try:
+                #    if self.dirmodel[channels[i]]['postvidpretime'] != self.dirmodel[channels[i]]['posttime'] and not self.dirmodel[channels[i]]['postvideo']:
+                    resulttext = requests.post(url, data=payload, files=files)
+                #        self.dirmodel[channels[i]]['postvidpretime'] = self.dirmodel[channels[i]]['posttime']
+                #        self.dirmodel[channels[i]]['postid'] = resulttext.json()['data']
+                #        self.dirmodel[channels[i]]['postframe'] = 0
+                #        self.dirmodel[channels[i]]['postvideo'] = True
+                #        self.dirmodel[channels[i]]['videoname'] = f'{vidsaveflod}/{timesave}.mp4'
+                        #videoqueuea.append(resulttext,f'{vidsaveflod}/{timesave}.mp4',)
+                    print(f'resulttext = {resulttext.json()["data"]}')
+                    
+                except Exception:
+                    print("posterror")
+                #if self.dirmodel[channels[i]]['postvideo']:
+
+                #print(f'resulttext = {resulttext}')
+                #time.sleep(3000)
+                dirmodel[channels[i]]['postpretime'] = time.time()
+                dirmodel[channels[i]]['detflag'] = False
+                #timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime'])
+                #year=time.strftime('%Y',time.localtime(time.time()))
+                #month=time.strftime('%m',time.localtime(time.time()))
+                #day=time.strftime('%d',time.localtime(time.time()))
+                savefold = f'/mnt/yolo/images/{Path(weights).stem}/{year}/{month}/{day}'
+                saveflod = Path(savefold)
+                detsavefold = f'/mnt/yolo/detimages/{Path(weights).stem}/{year}/{month}/{day}'
+                detsavefold = Path(detsavefold)
+                saveflod.mkdir(parents=True, exist_ok=True)
+                detsavefold.mkdir(parents=True, exist_ok=True)
+                cv2.imwrite(f'{savefold}/{timesave}.jpg',dirmodel[channels[i]]['oripreim'])
+                cv2.imwrite(f'{detsavefold}/{timesave}det.jpg',dirmodel[channels[i]]['preim'])
+                
+            self.view_img = False
+            if self.view_img:
+                if platform.system() == 'Linux' and p not in windows:
+                    windows.append(p)
+                    cv2.namedWindow(f'{str(p)}-{Path(weights).stem}',
+                                    cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
+                    cv2.resizeWindow(f'{str(p)}-{Path(weights).stem}', im0.shape[1], im0.shape[0])
+                im1 = cv2.resize(im0, (1280, 720))
+                cv2.imshow(f'{str(p)}-{Path(weights).stem}', im1)
+                cv2.waitKey(1)  # 1 millisecond
+
+            # Save results (image with detections)
+
+            # Print time (inference-only)
+            print(f'channels[i]={channels[i]}')
+            #LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms {time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))} {Path(weights).stem}")
+
+    # def load(self):
+    #     conn = sqlite3.connect(self.source)
+    #     c = conn.cursor()
+    #     while True:
+    #         #
+    #         # print ("数据库打开成功")
+    #
+    #         cursor = c.execute(
+    #             "SELECT modelname, addstream,delstream,streaming  from CHANGESTREAM WHERE modelname= (?)",(Path(self.opt.weights).stem,))
+    #         # content = cursor.fetchall()
+    #         # if content[0][1] ==1 or content[0][2] ==1:
+    #         #     c.execute("UPDATE CHANGESTREAM set streamimg = 1 where modelname='yolov5s'")
+    #         #     print("updata changestream")
+    #         #     conn.commit()
+    #         # cursor = c.execute(
+    #         # "SELECT modelname, addstream,delstream,streamimg  from CHANGESTREAM WHERE modelname='yolov5s'")
+    #         self.contentid = cursor.fetchall()
+    #         #global tag
+    #         #tag = Value('i', self.contentid[0][3])
+    #         #print(tag.value==1)
+    #         print(f'loadcontent={self.contentid[0][3]}')
+    #         time.sleep(3)
+    #     c.close()
+    #     conn.close()
+    def readpoint(self,weights):
+        #conn = sqlite3.connect(self.source)
+        #c = conn.cursor()
+        #cursor = c.execute(
+        #    "SELECT address,fence,point,channel,classindex,ip ,algip,label, durtime from STREAM WHERE modelname= (?)",
+        #    (Path(self.opt.weights).stem,))
+        data = {
+                    "algorithmCode": algmodel[Path(weights).stem],
+                    "deviceIp":None
+                }
+        dirmodel = {}
+        result = requests.post(url=urlt,data=data).json()['data']
+        channell=[]
+        for info in result: 
+        #content = cursor.fetchall()
+        #self.dirmodel = {}
+        #for address,fence,point,channel,classindex,ip ,algip,label,durtime in content:
+            #address = f'{address[:-1]}0'
+            channel = info["deviceChannel"]
+            channell.append(channel)
+            dirmodel[channel] = {}
+            dirmodel[channel]['fence'] = 1 if len(info["electricFence"])>0 else 0
+            #self.dirmodel[channel]['point'] = point
+            dirmodel[channel]['channel'] = info['deviceChannel']
+            dirmodel[channel]['classindex'] = info['algorithmCode']
+            dirmodel[channel]['ip'] = info['deviceIp']
+            dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
+            dataele = {
+                    "algorithmCode": dirmodel[channel]['classindex'],
+                    "algorithmIp":dirmodel[channel]['algip'],
+                    "channel":dirmodel[channel]['channel']
+                }
+            resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
+            resultele = resultele.split(',||')
+            resultele = tuple(resultele)
+            point = '%s:'*len(resultele) %resultele
+            if Path(weights).stem == 'personcount':
+                resultper = requests.post(url=urlperson,data=dataele).json()['data']
+                personcountdir[channel] = resultper
+            dirmodel[channel]['point'] = point[:-2]
+            dirmodel[channel]['preim'] = None
+            dirmodel[channel]['oripreim'] = None
+            dirmodel[channel]['detframe'] = [0 for _ in range(2)]
+            dirmodel[channel]['postpretime'] = 0
+            dirmodel[channel]['detflag'] = False
+            dirmodel[channel]['detpretime'] = float('inf')
+            dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
+            dirmodel[channel]['durtime'] = 0
+            dirmodel[channel]['posttime'] = 0
+            dirmodel[channel]['track_history'] = defaultdict(lambda: [])
+            dirmodel[channel]['time_stamps'] = defaultdict(lambda: deque(maxlen=50))
+            dirmodel[channel]['instantaneous_velocities'] = defaultdict(lambda: deque(maxlen=10))
+            #tmp = f'{weburl}/{channel}/{info["deviceAlgorithmIp"]}'
+            #dirmodel[channel]['web'] = f'{weburl}/{info["deviceId"]}/{info["algorithmCode"]}'
+        print(dirmodel)
+        return sorted(channell),dirmodel
+    #def strtolst(self,str):
+    #    str = str.split(":")
+    #    lista = []
+    #    for liststr in str:
+    #        if len(liststr) > 0:
+    #            li = liststr.split(',')
+    #            listpoint = []
+    #            for i, j in zip(li[::2], li[1::2]):
+    #                listpoint.append((i, j))
+    #            lista.append(listpoint)
+    #    return listpoint
+
+
+#def preprocess():
+#        print('preprocess-----------------------------------------------')
+
+# def getframe(queue,channelsl,source,tt,numworks,lock,numworkv):
+#     while True:
+#         print("dataloader")
+#         imgsz = [640, 640]
+#         print(f'source = {source}')
+#         print(f'inchannel ={channelsl}')
+#
+#         bs = len(dataset)
+#         vid_path, vid_writer = [None] * bs, [None] * bs
+#         # self.detframe = [[0 for _ in range(8)] for i in range(bs)]
+#         # self.postpretime = [0]*bs
+#         # Run inference
+#
+#         #imgsz = (1 , 3, *self.imgsz)
+#         print(imgsz)
+#         #self.model.warmup(imgsz=(1 , 3, *imgsz))  # warmup
+#         seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+#         sourcebase = 'project.db'
+#         #
+#         # print ("数据库打开成功")
+#         pretime = time.time()
+#         tag = 0
+#         for path, im, im0s, vid_cap, s, videotime,channels in dataset:
+#             # print('*'*21)
+#             # global tag
+#             if time.time()-pretime > 30:
+#                 channellist = []
+#                 pretime = time.time()
+#                 data = {
+#                     "algorithmCode": None,
+#                     "deviceIp":None
+#                 }
+#                 result = requests.post(url=urlt,data=data).json()['data']
+#                 for info in result:
+#                     channellist.append((info['deviceChannel'],info['playbackAddress']))
+#                 channelsa  = []
+#                 sourcea = []
+#                 channellist = set(channellist)
+#                 channellist = sorted(channellist,key=lambda x:x[0])
+#                 #channellist = set(channellist)
+#                 for cha,add in channellist:
+#                     channelsa.append(cha)
+#                     sourcea.append(add)
+#                 channelsl = sorted(channelsl)
+#                 #channelsa = sorted(channelsa)
+#                 if channelsa!=channelsl:
+#                     print(f'channelsa = {channelsa}')
+#                     print(f'channelsl = {channelsl}')
+#                     dataset.close()
+#                     channelsl = channelsa
+#                     source = sourcea
+#                     break;
+#
+#                 #conn = sqlite3.connect(sourcebase)
+#                 #c = conn.cursor()
+#                 #cursor = c.execute("SELECT modelname, addstream,delstream,streaming  from CHANGESTREAM WHERE modelname = 'stream'")
+#                 #contentid = cursor.fetchall()
+#                 #tag = contentid[0][3]
+#             #if tag == 1:
+#             #    lock.acquire()
+#             #    numworkv.value += 1
+#             #    dataset.close()
+#             #    if numworkv.value==3:
+#             #        print('newstreaming=', tag)
+#             #        conn = sqlite3.connect(source)
+#             #        c = conn.cursor()
+#             #        c.execute("UPDATE CHANGESTREAM set streaming = 0 , addstream=0,delstream=0 where modelname= 'helmet'")
+#             #        print(123)
+#             #        conn.commit()
+#             #        c.close()
+#             #        conn.close()
+#             #    lock.release()
+#             #    print('opencv1')
+#                 # cv2.destroyAllWindows()
+#             #    print('opencv')
+#             #    break
+#             #else:
+#             #    print('nonewstreaming=', tag)
+#             queue.put((path, im, im0s, vid_cap, s, videotime,channels))
+#             queue.get() if queue.qsize() > 3 else time.sleep(0.001)
+#
+# def getmutpro(channels,source,streamlist,numworkv,lock,numworks=1):
+#     processlist = []
+#     queuelist = []
+#     for i in range(numworks):
+#         queue = Queue(maxsize=4)
+#         process = Process(target=getframe,
+#                         args=(queue, channels,source, i,numworks,lock,numworkv))
+#         processlist.append(process)
+#         process.start()
+#         queuelist.append(queue)
+#     return queuelist
+#     #path = []
+#     #im0s = []
+#     #vid_cap = None
+#     #s = ''
+#     #videotime = []
+#     #while True:
+#     #    imlist = []
+#     #    pathlist = []
+#     #    im0slist = []
+#     #    channelslist = []
+#     #    vid_cap = None
+#     #    s = ''
+#     #    videotimelist = []
+#     #    for q in queuelist:
+#     #        if q.qsize()>0:
+#     #            setframe = q.get()
+#     #            path, im, im0s, vid_cap, s, videotime,channels = setframe
+#     #            channelslist += channels
+#     #            pathlist += path
+#     #            im0slist += im0s
+#     #            videotimelist += videotime
+#     #            imlist.append(im)
+#     #    if len(imlist)>0:
+#     #        im = np.concatenate(imlist)
+#     #    if len(pathlist)>0:
+#     #        print(len(path),im.shape,len(im0s))
+#     #        streamlist.append((pathlist, im, im0slist, vid_cap, s, videotimelist,channelslist))
+#             #print(f'streamlist = {len(streamlist)}')
+#     #    streamlist.pop(0) if len(streamlist) > 3 else time.sleep(0.001)
+
+def modelfun(detectdemo,weights,classify=False,conf_thres=0.80,device='',runmodel=None):
+    print(weights)
+    #detectdemo=Detect(weights=weights,source=sourcedb,classify=classify,conf_thres=conf_thres,device=device)
+    detectdemo.infer(weights,classify,conf_thres,device,runmodel)
+    #detectdemo.infer(weights, classify, conf_thres, device, runmodel)
+
+def detectmut(channellist,source,modellist,contentlist,modeladir,runmodel={},deviceid=''):
+    detectdemo = Detect(channelsl=channellist,source=source)
+        # while True:
+    for modelname in modellist:
+        if modelname in contentlist:
+            #if modelname not in runmodel:
+        #print(i)
+        #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
+                #if modelname in ['fall','helmet','bag','arm']:
+            print(f'weights ={modelname}.pt deviceid {deviceid}')
+            #c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
+            #rea = c.fetchall()
+            process = threading.Thread(target=modelfun,args=(detectdemo,f'{modelname}.pt',modeladir[modelname]['cla'],modeladir[modelname]['conf'],deviceid,runmodel))
+            #elif modelname in ['personcount','persontre']:
+            #    process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,False,0.50,'',runmodel))
+            #elif modelname in ['uniform']:
+            #    process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,True,0.50,'',runmodel))
+            #else:
+            #    process = Process(target=modelfun,args=(streamlist,f'{modelname}.pt',dbpath))
+            #runmodel[modelname] = 1
+    #processes.append(process)
+    #process.start()
+    #detectobj = Process(target=detectdemo.infer,args=(queue,))
+    # Detect(weights=f'{i[0]}.pt')
+
+            time.sleep(3)
+            process.start()
+
+
+
+
+def parse_opt():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
+    opt = parser.parse_args()
+    return opt
+
+
+def main(opt):
+    check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+    run(**vars(opt))
+
+
+if __name__ == '__main__':
+    torch.multiprocessing.set_start_method('spawn')
+    #set_start_method('spawn')
+    #multiprocessing.set_start_method('spawn')
+    torch.cuda.set_per_process_memory_fraction(0.6)
+    opt = parse_opt()
+    dbpath = 'projectnew.db'
+    conn = sqlite3.connect(dbpath)
+#
+# print ("数据库打开成功")
+    c = conn.cursor()
+    task(c,conn,urlt,urla)
+    cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'jump'")
+    #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+    content = cursor.fetchall()
+    contentlist = []
+    for con in content:
+        contentlist.append(con[0])
+    #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
+    cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname = 'jump'")
+    contenta = cursor.fetchall()
+    source = []
+    modellist = []
+    addcha = []
+    channellist = []
+    for i in contenta:
+        addcha.append((i[0],i[2]))
+        modellist.append(i[1])
+    addcha = set(addcha)
+    addcha = sorted(addcha,key=lambda x:x[1])
+    for a,cha in addcha:
+        source.append(a)
+        channellist.append(cha)
+    print(addcha)
+    #source = set(source)
+    source = list(source)
+    #source.sort()
+    modellist = set(modellist)
+    n = len(content)
+    print(f'modelname={n}')
+    print(content)
+    #content.reverse()
+    print(content)
+    # main(opt)
+    #processes = []
+    streamqueue = Queue(maxsize=4)
+    numworkv = Value('i', 0)
+    manager = Manager()
+    lock = multiprocessing.Lock()
+    streamlist = manager.list()
+    numworks = 1
+
+    #queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks)
+    #thread.start()
+    #videoqueue = Queue(maxsize=20)
+    #thread1 = threading.Thread(target=postvideo, args=(videoqueue,))
+    #thread1.start()
+    #pool = ThreadPoolExecutor(max_workers=n)
+    #runmodel = manager.dict()
+    modeladir = {}
+    for modelname in modellist:
+        if modelname in contentlist:
+            #if modelname not in runmodel:
+        #print(i)
+        #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
+                #if modelname in ['fall','helmet','bag','arm']:
+            print(f'weights ={modelname}.pt')
+            c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
+            rea = c.fetchall()
+            #print(f'rea')
+            modeladir.setdefault(modelname,{})
+            modeladir[modelname]['conf'] = rea[0][0]
+            modeladir[modelname]['cla'] = rea[0][1]
+    runmodel = {}
+    for deviceid,num in enumerate(range(0,len(channellist),13)):
+        process = Process(target=detectmut,args=(channellist[num:num+13],source[num:num+13],modellist,contentlist,modeladir,{},deviceid%2))
+        time.sleep(3)
+        process.start()
+        #deviceid = deviceid+1
+        # detectdemo = Detect(channelsl=channellist[0:num],source=source[0:num])
+        # # while True:
+        # for modelname in modellist:
+        #     if modelname in contentlist:
+        #         if modelname not in runmodel:
+        #     #print(i)
+        #     #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
+        #             #if modelname in ['fall','helmet','bag','arm']:
+        #             print(f'weights ={modelname}.pt')
+        #             c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
+        #             rea = c.fetchall()
+        #             process = threading.Thread(target=modelfun,args=(detectdemo,f'{modelname}.pt',rea[0][1],rea[0][0],'',runmodel))
+        #             #elif modelname in ['personcount','persontre']:
+        #             #    process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,False,0.50,'',runmodel))
+        #             #elif modelname in ['uniform']:
+        #             #    process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,True,0.50,'',runmodel))
+        #             #else:
+        #             #    process = Process(target=modelfun,args=(streamlist,f'{modelname}.pt',dbpath))
+        #             runmodel[modelname] = 1
+        #     #processes.append(process)
+        #     #process.start()
+        #     #detectobj = Process(target=detectdemo.infer,args=(queue,))
+        #     # Detect(weights=f'{i[0]}.pt')
+
+        #             time.sleep(3)
+        #             process.start()
+        # time.sleep(900)
+        # task(c,conn,urlt,urla)
+        # cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'fall' or modelname = 'uniform' or modelname = 'personcount' or modelname = 'arm' or modelname = 'bag'")
+        # #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+        # content = cursor.fetchall()
+        # contentlist = []
+        # for con in content:
+        #     contentlist.append(con[0])
+        # #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
+        # cursor = c.execute("SELECT address,modelname,channel from STREAM ")
+        # contenta = cursor.fetchall()
+        # source = []
+        # modellist = []
+        # addcha = []
+        # channellist = []
+        # for i in contenta:
+        #     addcha.append((i[0],i[2]))
+        #     modellist.append(i[1])
+        # addcha = set(addcha)
+        # addcha = sorted(addcha)
+        # for a,cha in addcha:
+        #     source.append(a)
+        #     channellist.append(cha)
+        # print(addcha)
+        # #source = set(source)
+        # source = list(source)
+        # #source.sort()
+        # modellist = set(modellist)
+        # n = len(content)
+        # print(f'modelname={n}')
+        # print(content)
+        # #content.reverse()
+        # print(content)
+        #pool.submit(detectobj.infer)
+
+    #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
+    #content = cursor.fetchall()
+    #n = len(content)
+    #print(f'modelname={n}')
+    #print(content)
+    #content.reverse()
+    #print(content)
+    # main(opt)
+    #processes = []
+    #pool = ProcessPoolExecutor(max_workers=n)
+    #for i in content:
+        #print(i)
+        #detectdemo=Detect(weights=f'{i[0]}.pt')
+        #process = Process(target=detectdemo.infer)
+        #processes.append(process)
+        #process.start()
+        #detectobj = Detect(weights=f'{i[0]}.pt')
+    #    time.sleep(3)
+        #pool.submit(detectobj.infer)
+    #    print('111111111111111111111111111111111111111111111111111111111')
+        #pool.submit(TestA().func1, '张三', i)
+        #print('----------------------------------------------------------------')
+    #time.sleep(3000)
+    # 等待所有进程执行完毕
+    #for process in processes:
+    #    process.join()
+
+        #pool.submit(Detect(weights=f'{i[0]}.pt').infer)
+    # if isinstance(opt.weights,list):
+    #     opt.weights = opt.weights[0]
+    #signal.signal(signal.SIGINT, my_handler)
+    #detectdemo1 = Detect(weights=f'{content[0][0]}.pt')
+    #detectdemo1.infer()
+    #a = Test
+    #with ProcessPoolExecutor(3) as ppool:
+        #for i in range(3):
+    #        print('hello')
+                        #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
+            #ppool.submit(TestA().func1, '张三', i)
+    #ta = TestA()
+    #with ProcessPoolExecutor(5) as ppool: # 创建一个5个进程的进程池
+    #    for i in range(1, 4):
+    #        ppool.submit(func1, '张三', i)
+    #f1= pool.submit(detectdemo1.infer)
+    # print("线程1-----------------------------------------------------------------------------------")
+    #detectdemo2 = Detect(weights=r"helmet.pt")
+    #f2=pool.submit(detectdemo2.infer)
+    # print("线程2-------------------------------------------------------------------------------------")
+    #detectdemo3 = threading.Thread(target=detectdemo3.infer)
+    #detectdemo3 = Detect(weights=r"fall.pt")
+    #f3=pool.submit(detectdemo3.infer)

+ 1247 - 0
detectopencvthrrun1230.py

@@ -0,0 +1,1247 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
+
+Usage - sources:
+    $ python detect.py --weights yolov5s.pt --source 0                               # webcam
+                                                     img.jpg                         # image
+                                                     vid.mp4                         # video
+                                                     screen                          # screenshot
+                                                     path/                           # directory
+                                                     list.txt                        # list of images
+                                                     list.streams                    # list of streams
+                                                     'path/*.jpg'                    # glob
+                                                     'https://youtu.be/Zgi9g1ksQHc'  # YouTube
+                                                     'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
+
+Usage - formats:
+    $ python detect.py --weights yolov5s.pt                 # PyTorch
+                                 yolov5s.torchscript        # TorchScript
+                                 yolov5s.onnx               # ONNX Runtime or OpenCV DNN with --dnn
+                                 yolov5s_openvino_model     # OpenVINO
+                                 yolov5s.engine             # TensorRT
+                                 yolov5s.mlmodel            # CoreML (macOS-only)
+                                 yolov5s_saved_model        # TensorFlow SavedModel
+                                 yolov5s.pb                 # TensorFlow GraphDef
+                                 yolov5s.tflite             # TensorFlow Lite
+                                 yolov5s_edgetpu.tflite     # TensorFlow Edge TPU
+                                 yolov5s_paddle_model       # PaddlePaddle
+"""
+import matplotlib.path as mat
+import requests
+import argparse
+import os
+import platform
+import sqlite3
+import sys
+import threading
+import time
+from pathlib import Path
+import signal
+import torch
+from concurrent.futures import ThreadPoolExecutor
+from concurrent.futures import ProcessPoolExecutor
+from multiprocessing import Process,Manager,Value
+from multiprocessing import Queue
+from multiprocessing import set_start_method
+import multiprocessing
+import multiprocessing as mp
+import numpy as np
+import platform
+import pathlib
+from collections import defaultdict, deque
+#import websockets
+import asyncio
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[0]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+import math
+from models.common import DetectMultiBackend
+from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages,LoadStreams, LoadStreamsSQLNEWN,LoadStreamsSQL,LoadStreamsSQLNRERT,LoadStreamsVEight,LoadStreamsSQLTN
+from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
+                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh,strtolst,strtolstl,apply_classifier1,task)
+from utils.plots import Annotator, colors, save_one_box
+from utils.torch_utils import select_device, smart_inference_mode
+#from testpool import func1,TestA
+from ultralytics import YOLO
+from ultralytics.trackers.bot_sort import BOTSORT
+from ultralytics.utils.checks import check_yaml
+from ultralytics.utils import IterableSimpleNamespace, yaml_load, ops
+from ultralytics.nn.autobackend import AutoBackend
+from ultralytics.utils.ops import non_max_suppression
+from ultralytics.engine.results import Results
+# def my_handler(signum, frame):
+#     exit(0)
+#url = "http://36.7.84.146:18802/ai-service/open/api/operate/upload"
+plt = platform.system()
+if plt != 'Windows':
+  pathlib.WindowsPath = pathlib.PosixPath
+urlhead = "http://172.19.152.231"
+url = f"{urlhead}/open/api/operate/upload"
+urlele = f"{urlhead}/open/api/operate/fence"
+urlperson = f"{urlhead}/open/api/operate/getPersonLimitNum"
+urlt = f"{urlhead}/open/api/operate/taskList"
+urla = f"{urlhead}/open/api/operate/algorithmList"
+weburl = f"ws://36.7.84.146:28801/websocket/device"
+urlrtsp = f"{urlhead}/open/api/operate/previewURLs"
+personcountdir = {}
+algdir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'duty','98':'face','51':'run'}
+modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person'}
+modelalgdir = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51'}
+algmodel = {}
+for key,value in algdir.items():
+    algmodel[value] = key
+
+def on_predict_postprocess_end(predictor: object, persist: bool = False,im0s=None) -> None:
+    """
+    Postprocess detected boxes and update with object tracking.
+
+    Args:
+        predictor (object): The predictor object containing the predictions.
+        persist (bool): Whether to persist the trackers if they already exist.
+
+    Examples:
+        Postprocess predictions and update with tracking
+        >>> predictor = YourPredictorClass()
+        >>> on_predict_postprocess_end(predictor, persist=True)
+    """
+
+    for i in range(len(im0s)):
+        tracker = predictor.trackers[i]
+
+        det = predictor.results[i].boxes.cpu().numpy()
+        #print(f'det = {det}')
+        if len(det) == 0:
+            continue
+        #print(f'predet = {type(det)}')
+        tracks = tracker.update(det, im0s[i])
+        if len(tracks) == 0:
+            continue
+        idx = tracks[:, -1].astype(int)
+        predictor.results[i] = predictor.results[i][idx]
+
+        update_args = {"boxes": torch.as_tensor(tracks[:, :-1])}
+        predictor.results[i].update(**update_args)
+
+def map_to_ellipse(position):
+    x, y = position
+    center_x = 640
+    center_y = 360
+    a = 580
+    b = 280
+
+    x_norm = x / 1280
+    y_norm = y / 720
+
+    d_norm = math.sqrt((x_norm - 0.5) ** 2 + (y_norm - 0.5) ** 2)
+    theta_norm = math.atan2(y_norm - 0.5, x_norm - 0.5)
+    f = d_norm
+    a_new = a * f
+    b_new = b * f
+
+    bias_x = center_x + a_new * math.cos(theta_norm)
+    bias_y = center_y + b_new * math.sin(theta_norm)
+
+    return np.array([bias_x, bias_y])
+
+class YoloOpt:
+    def __init__(self, weights=ROOT / 'yolov5s.pt',source=ROOT / 'data/images',data=ROOT / 'data/coco128.yaml',
+                 imgsz=(640,640),
+                 conf_thres=0.80,
+                 iou_thres=0.45,
+                 max_det=1000,
+                 device='',
+                 view_img=False,
+                 save_txt=False,
+                 save_conf=False,
+                 save_crop=False,
+                 nosave=True,
+                 classes=None,
+                 agnostic_nms=False,
+                 augment=False,
+                 visualize=False,
+                 update=False,
+                 project=ROOT / 'runs/detect',
+                 name='exp',
+                 exist_ok=False,
+                 line_thickness=1,
+                 hide_labels=False,
+                 hide_conf=False,
+                 half=False,
+                 dnn=False,
+                 vid_stride=10,
+                 classify=False):
+
+        self.weights = weights  # 权重文件地址
+        self.source = source  # 待识别的图像
+        self.data = data
+        if imgsz is None:
+            self.imgsz = (640, 640)
+        self.imgsz = imgsz  # 输入图片的大小,默认 (640,640)
+        self.conf_thres = conf_thres  # object置信度阈值 默认0.25  用在nms中
+        self.iou_thres = iou_thres  # 做nms的iou阈值 默认0.45   用在nms中
+        self.device = device  # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法
+        self.view_img = view_img  # 是否展示预测之后的图片或视频 默认False
+        self.classes = classes  # 只保留一部分的类别,默认是全部保留
+        self.agnostic_nms = agnostic_nms  # 进行NMS去除不同类别之间的框, 默认False
+        self.augment = augment  # augmented inference TTA测试时增强/多尺度预测,可以提分
+        self.update = update  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+        self.exist_ok = exist_ok  # 如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
+        self.project = project  # 保存测试日志的参数,本程序没有用到
+        self.name = name  # 每次实验的名称,本程序也没有用到
+        self.max_det = max_det
+        self.save_txt = save_txt
+        self.save_conf= save_conf
+        self.save_crop= save_crop
+        self.nosave = nosave
+        self.visualize = visualize
+        self.line_thickness = line_thickness
+        self.hide_labels = hide_labels
+        self.hide_conf = hide_conf
+        self.half = half
+        self.dnn = dnn
+        self.vid_stride = vid_stride
+        self.classify = classify
+class Detect:
+    def __init__(self, weights = ROOT / 'yolov5s.pt' , imgsz=(640,640),source="changshusql1103.db",classify=False,conf_thres=0.80,device='',channelsl=''):
+        print(f'detectweights = {weights}')
+        self.opt = YoloOpt(weights=weights, imgsz=imgsz,source=source,classify=classify,conf_thres=conf_thres,device=device)
+        self.source = str(self.opt.source)
+        self.save_img = not self.opt.nosave and not source.endswith('.txt')  # save inference images
+        is_file = Path(self.source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
+        is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
+        self.webcam = True
+        screenshot = self.source.lower().startswith('screen')
+        if is_url and is_file:
+            self.source = check_file(self.source)  # download
+        self.save_dir = increment_path(Path(self.opt.project) / self.opt.name, exist_ok=self.opt.exist_ok)  # increment run
+        #self.save_dir = self.save_dir / Path(self.opt.weights).stem
+        #self.save_dir.mkdir(parents=True, exist_ok=True)
+        (self.save_dir / 'labels' if self.opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)  # make dir
+
+        #self.imgsz = check_img_size(self.opt.imgsz, s=self.stride)
+
+        # self.readchannel = self.readpoint()
+        #print(self.imgsz)
+        self.updatetime = time.time()
+        #self.filetime = os.path.getmtime(self.opt.weights)
+        bs = 1  # batch_size
+        if self.webcam:
+            #self.view_img = check_imshow(warn=True)
+            self.view_img = False
+            # dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
+            tt= 0
+            numworks = 1
+            self.dataset = LoadStreamsSQLTN(channelsl, source, img_size=640,
+                                        auto=True, vid_stride=10, tt=tt, numworks=numworks)
+            # bs = len(dataset)
+        elif screenshot:
+            dataset = LoadScreenshots(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt)
+        else:
+            self.dataset = LoadImages(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt, vid_stride=self.opt.vid_stride)
+        self.speed_threshold = 60
+        self.high_velocity_count_threshold = 20
+        # t1 = threading.Thread(target=self.load,daemon=True)
+        # t1.start()
+    @smart_inference_mode()
+    def infer(self,weights,classify,conf_thres,device,runmodel):
+        tracker = check_yaml(r'/home/h3c/.local/lib/python3.8/site-packages/ultralytics/cfg/trackers/botsort.yaml')
+        cfg = IterableSimpleNamespace(**yaml_load(tracker))
+        device = select_device(device)
+        print("loadmodel device{device}")
+        model = AutoBackend(weights, device=device, dnn=self.opt.dnn, data=self.opt.data,
+                                        fp16=self.opt.half)
+        stride, names, pt = model.stride, model.names, model.pt
+        imgsz = check_img_size(self.opt.imgsz, s=stride)
+        model.warmup(imgsz=(1, 3, *imgsz))
+        trackers = []
+        for _ in range(len(self.dataset)):
+            tracker = BOTSORT(args=cfg, frame_rate=30)
+            trackers.append(tracker)
+        model.trackers = trackers
+        # stride, names, pt = model.stride, model.names, model.pt
+        if classify:
+            classifier_model = torch.load(f"{Path(weights).stem}cls.pt")
+            classifier_model = classifier_model.to(device)
+            classifier_model.eval()
+            print('classify--------------------------------------------------------------------')
+        #imgsz = check_img_size(self.opt.imgsz, s=stride)
+        #model.warmup(imgsz=(1, 3, *imgsz))
+        readchannel,dirmodel = self.readpoint(weights)
+        #print(imgsz)
+        updatetime = time.time()
+        filetime = os.path.getmtime(weights)
+        #selfreadpoint();
+        pretime = time.time()
+        seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+        #
+        # print ("数据库打开成功")
+        #async with websockets.connect(uri) as websocket:
+        # for key in dirmodel.keys():
+        #     dirmodel[key]['websoc'] = await websockets.connect(dirmodel[key]['web'])
+        for path, im, im0s, vid_cap, s, videotime,channels in self.dataset:
+            #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
+            #if len(queue) == 0:
+                # print('queuezero')
+            #    time.sleep(0.01)
+
+            #print(f'timee = {time.time()-timee}')
+            hour = time.localtime(time.time()).tm_hour
+            if hour not in range(7,18):
+                time.sleep(30)
+                continue
+            if time.time()-pretime>300:
+                ret,_ = self.readpoint(weights)
+                # for key in dirmodel.keys():
+                #     dirmodel[key]['websoc'] = await websockets.connect(dirmodel[key]['web'])
+                pretime = time.time()
+                if not ret:
+                    print(f'{Path(weights).stem} {runmodel}')
+                    runmodel.pop(Path(weights).stem)
+                    print(f'{Path(weights).stem} {runmodel}')
+                    break
+            algchannel = list(dirmodel.keys())
+            #print(algaddress)
+            print(path)
+            algchannel = np.array(algchannel)
+            #pathnp = np.array(path)
+            channelsnp = np.array(channels)
+            algindex = np.where(np.in1d(channelsnp, algchannel))[0]
+            algindex = list(algindex)
+            if len(algindex)==0:
+                continue
+            path = np.array(path)
+            path = path[algindex]
+            path = path.tolist()
+            channels = np.array(channels)
+            channels = channels[algindex]
+            channels = channels.tolist()
+            #im = im[algindex]
+            #print(im0s.shape)
+            im = im[algindex]
+            #if len(im0s)==1:
+            #    im0tmp = np.asarray(im0s)
+            if len(im0s)==1:
+                im0tmp = np.asarray(im0s)
+            else:
+                im0tmp = np.asarray(im0s,dtype = object)
+            #print(f'im0tmplen = {len(im0tmp)}')
+            #if len(im0tmp)>1:
+            #except Exception:
+                #im0tmp = np.asarray(im0s)
+                #print('no object')
+            im0s = im0tmp
+            im0s = im0s[algindex]
+            # im0s = im0s.tolist()
+            print(f'algindex = {algindex}')
+            print(f'im0s ={im0s[0].shape}')
+            videotime = np.array(videotime)
+            videotime = videotime[algindex]
+            videotime = tuple(map(tuple, videotime))
+                # if self.contentid[0][3] == 1 :
+                #    dataset.close()
+                #    print('newstreaming=', self.contentid[0][3])
+                #    conn = sqlite3.connect(self.source)
+                #    c = conn.cursor()
+                #    c.execute("UPDATE CHANGESTREAM set streamimg = 0 , addstream=0,delstream=0 where modelname= (?)",(Path(self.opt.weights).stem,))
+                #    print(123)
+                #    conn.commit()
+                #    c.close()
+                #    conn.close()
+                #    print('opencv1')
+                # cv2.destroyAllWindows()
+                #    print('opencv')
+                #    break
+                # else:
+                #    print('nonewstreaming=', self.contentid[0][3])
+            with dt[0]:
+                im = torch.from_numpy(im).to(model.device)
+                im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32
+                im /= 255  # 0 - 255 to 0.0 - 1.0
+                if len(im.shape) == 3:
+                    im = im[None]  # expand for batch dim
+
+            # Inference
+            with dt[1]:
+                visualize = False
+                #print('error')
+                print(im.shape)
+                pred =model(im, augment=self.opt.augment, visualize=visualize)
+            if classify:
+                self.postprocess(pred, path, im0s, im, s, videotime,channels,names,dt,classify,updatetime,dirmodel,weights,filetime,classifier_model,model)
+                #pass
+            else:
+                
+                self.postprocess(pred, path, im0s, im, s, videotime, channels, names, dt, classify, updatetime,
+                                     dirmodel, weights, filetime, None,model)
+                #await self.postprocess(pred, path, im0s, im, s, videotime, channels, names, dt, classify, updatetime,
+            #dirmodel, weights, filetime, None)
+               # print(f'predshape= {')
+
+            # NMS
+                #processlist = []
+            #for i in range(3):
+            #    process = Process(target=self.postprocess,args=(pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s))
+            #    process = Process(target=self.preprocess)
+            #    process.start()
+            #    processlist.append(process)
+            #for j in processlist:
+            #    j.join()
+            #with ProcessPoolExecutor(3) as ppool:
+                #for i in range(3):
+            #        print('hello')
+                    #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
+                    #ppool.submit(func1, '张三', i)
+                    #ppool.submit(self.preprocess)
+            #self.postprocess(pred, path, im0s, dataset, im, s)
+            #process = Process(target=self.postprocess, args=(pred, path, im0s, dataset, im, s))
+                #self.postprocess(pred, path, im0s, im, s,videotime)
+                #process.start()
+    #def preprocess(self):
+    #    print('preprocess-----------------------------------------------')
+    def postprocess(self, pred, path, im0s,  im, s,videotime,channels,names,dt,classify,updatetime,dirmodel,weights,filetime,classifier_model,model):
+        seen = 0
+        # dt = (Profile(), Profile(), Profile())
+        print(f'senn = {seen}')
+        windows = []
+        with dt[2]:
+            #print(f'cropshape={pred.shape}')
+            pred = non_max_suppression(pred, 0.6, self.opt.iou_thres, 0,
+                                       self.opt.agnostic_nms, max_det=self.opt.max_det)
+            if classify:
+                pred = apply_classifier1(pred, classifier_model, im, im0s,Path(weights).stem)
+
+            # Second-stage classifier (optional)
+            # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
+
+            # Process predictions
+            #print(f'predshape={pred.shape}')
+        model.results = []
+        for i, det in enumerate(pred):  # per image
+            current_time = time.time()
+            if time.time()-updatetime>300:
+                if filetime !=os.path.getmtime(weights):
+                    device = select_device(self.opt.device)
+                    print("load new load")
+                    model = YOLO(weights, device=device, dnn=self.opt.dnn, data=self.opt.data, fp16=self.opt.half)
+                    stride, names, pt = model.stride, model.names, model.pt
+                dataele = {
+                    "algorithmCode": dirmodel[channels[i]]['classindex'],
+                    "algorithmIp":dirmodel[channels[i]]['algip'],
+                    "channel":dirmodel[channels[i]]['channel']
+                }
+                try:
+                    resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
+                    resultele = resultele.split(',||')
+                    resultele = tuple(resultele)
+                    point = '%s:'*len(resultele) %resultele
+                    dirmodel[channels[i]]['point'] = point[:-2]
+                except Exception:
+                    print('post error')
+                if Path(weights).stem == 'personcount':
+                    try:
+                        resultper = requests.post(url=urlperson,data=dataele).json()['data']
+                        personcountdir[channels[i]] = resultper
+                    except Exception:
+                        print('urlpersonerror')
+                #try:
+                if  modelalgdir[Path(weights).stem]!='0':
+                    print(modelalgdir[Path(weights).stem])
+                    rea = requests.post(url=urla,data={'algorithmCode':modelalgdir[Path(weights).stem]}).json()['data']
+                    con = rea[0]['confidence']
+                    conf_thres = con
+                else:
+                    conf_thres = 0.25
+                #except Exception:
+                    #print('posturlaerror')
+                updatetime = time.time()
+            seen += 1
+            if self.webcam:  # batch_size >= 1
+                p, im0 = path[i], im0s[i].copy()
+                im0 = np.uint8(im0)
+                s += f'{i}: '
+            else:
+                p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
+
+            p = Path(p)  # to Path
+
+            #save_path = str(self.save_dir / p.name)  # im.jpg
+            #txt_path = str(self.save_dir / 'labels' / p.stem) + (
+               # '' #if dataset.mode == 'image' else f'_{frame}')  # im.txt
+            s += '%gx%g ' % im.shape[2:]  # print string
+            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
+            imc = im0.copy()  # for save_crop
+            annotator = Annotator(im0, line_width=self.opt.line_thickness, example=str(names))
+            flag = False
+            for c in det[:, 5].unique():
+                n = (det[:, 5] == c).sum()  # detections per class
+                s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string
+            det[:, :4] = ops.scale_boxes(im.shape[2:], det[:, :4], im0s[i].shape)
+            # print(pred[:,:4])
+            model.results.append(Results(im0, path=path[i], names={0: 'person'}, boxes=det))
+        on_predict_postprocess_end(model, True, im0s)
+        for i,det in enumerate(model.results):
+
+            flag = False
+            print(f'++++++++++++++++++++++{det.boxes}')
+            if det.boxes and det.boxes.id is not None:
+                print('-----------------------------------')
+                if Path(weights).stem == 'personcount':
+                    personnum = 0
+                if Path(weights).stem == 'persontre':
+                    tredir = {}
+                #flag = True
+                # Rescale boxes from img_size to im0 size
+                boxes = det.boxes.xywh.cpu()
+                track_ids = det.boxes.id.int().cpu().tolist()
+
+                # Print results
+                # for c in det[:, 5].unique():
+                #     n = (det[:, 5] == c).sum()  # detections per class
+                #     s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string
+
+                # Write results
+                #message = '['
+                for box, track_id in zip(boxes, track_ids):
+                    x, y, w, h = box
+                    if dirmodel[channels[i]]['fence'] == 1:
+                        c = int(cls)
+                        labelname = names[c]
+                        if labelname in dirmodel[channels[i]]['label']:
+
+                            point = strtolstl(dirmodel[channels[i]]['point'])
+                            for poi in point:
+                                p1 = (int(xyxy[0].cpu().item()+(xyxy[2].cpu().item()-xyxy[0].cpu().item())/3), xyxy[3].cpu().item())
+                                p2 = (int(xyxy[0].cpu().item()+(xyxy[2].cpu().item()-xyxy[0].cpu().item())/3*2), xyxy[3].cpu().item())
+                                p3 = (int(xyxy[0])+(int(xyxy[2])-int(xyxy[0]))/2,int(xyxy[1])+(int(xyxy[3])-int(xyxy[1]))/2)
+                                pt = [p1,p2]
+                                if Path(weights).stem == 'danager':
+                                    pt = [p3]
+                                inflag = mat.Path(poi).contains_points(pt)
+                                if inflag.any():
+                                    x, y, x2, y2 = xyxy
+                                    w = x2 - x
+                                    h = y2 - y
+                                    flag = True
+                                    #message = message + '{' + 'x:' + str(int(x)) + ',y:' + str(int(y)) + ',w:' + str(
+                                        #int(w)) + ',h:' + str(int(h)) + ',c:' + "'" + str(round(float(conf), 2)) + "'},"
+                                #c = int(cls)  # integer class
+                                #label = f'{self.names[c]} {conf:.2f}'
+                                    label = None
+                                    annotator.box_label(xyxy, label, color=colors(c, True))
+                    else:
+                        im0 = model.results[i].orig_img.copy()
+                        imc = im0.copy()
+                        cv2.rectangle(im0, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)),
+                                      (0, 255, 0), 2)
+                        bottom_left_x = int(x - w / 2)
+                        bottom_left_y = int(y + h / 2)
+
+                        # 计算中心点
+                        center_x = int(x)
+                        center_y = int(y)
+
+                        # 绘制中心点
+                        cv2.circle(im0, (center_x, center_y), 5, (255, 0, 0), -1)  # 红色中心点,半径为 5
+                        dirmodel[channels[i]]['track_history'][track_id].append((bottom_left_x, bottom_left_y))
+                        if len(dirmodel[channels[i]]['track_history'][track_id]) > 100:
+                            del dirmodel[channels[i]]['track_history'][track_id][:-50]  # 维持历史长度
+
+                        # 记录每一帧的时间
+                        dirmodel[channels[i]]['time_stamps'][track_id].append(current_time)
+
+                        # 计算时间间隔
+                        if len(dirmodel[channels[i]]['time_stamps'][track_id]) > 1:
+                            delta_time = dirmodel[channels[i]]['time_stamps'][track_id][-1] - dirmodel[channels[i]]['time_stamps'][track_id][-2]  # 最近两帧的时间差
+                        else:
+                            delta_time = 0
+
+                        instantaneous_velocity = 0
+                        # 计算二维瞬时速度
+                        if len(dirmodel[channels[i]]['track_history'][track_id]) >= 2:
+                            pos1 = np.array(dirmodel[channels[i]]['track_history'][track_id][-1])  # 最新位置
+                            pos2 = np.array(dirmodel[channels[i]]['track_history'][track_id][-2])  # 前一个位置
+
+                            pos1 = map_to_ellipse(pos1)
+                            pos2 = map_to_ellipse(pos2)
+                            distance = np.linalg.norm(pos1 - pos2)
+
+                            # 使用时间间隔进行速度计算
+                            instantaneous_velocity = distance / delta_time if delta_time > 0 else np.zeros(2)
+
+                            instantaneous_velocity_magnitude = round(np.linalg.norm(instantaneous_velocity), 1)
+                            dirmodel[channels[i]]['instantaneous_velocities'][track_id].append(instantaneous_velocity_magnitude)
+                        else:
+                            instantaneous_velocity_magnitude = 0
+
+                        # 判断是否有足够数量的高速度
+                        high_velocity_count = sum(
+                            1 for velocity in dirmodel[channels[i]]['instantaneous_velocities'][track_id] if velocity > self.speed_threshold)
+                        if high_velocity_count >= self.high_velocity_count_threshold:
+                            cv2.putText(im0, str(instantaneous_velocity_magnitude), (int(x), int(y)),
+                                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
+                            flag = True
+                    #if self.opt.save_txt:  # Write to file
+                    #    xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(
+                    #        -1).tolist()  # normalized xywh
+                    #    line = (cls, *xywh, conf) if self.opt.save_conf else (cls, *xywh)  # label format
+                    #    with open(f'{txt_path}.txt', 'a') as f:
+                    #        f.write(('%g ' * len(line)).rstrip() % line + '\n')
+
+                    #if self.save_img or self.opt.save_crop or self.view_img:  # Add bbox to image
+                #         c = int(cls)  # integer class
+                #         labelname = names[c]
+                #         if labelname in dirmodel[channels[i]]['label']:
+                #             #if Path(self.opt.weights).stem == 'arm':
+                #             #label = f'{self.names[c]} {conf:.2f}'
+                #             if labelname != 'arm':
+                #                 label = None
+                #                 annotator.box_label(xyxy, label, color=colors(c, True))
+                #                 flag = True
+                #                 x, y, x2, y2 = xyxy
+                #                 w = x2 - x
+                #                 h = y2 - y
+                #                 #message = message + '{' + 'x:' + str(int(x)) + ',y:' + str(int(y)) + ',w:' + str(
+                #                 #    int(w)) + ',h:' + str(int(h)) + ',c:' + "'" + str(round(float(conf), 2)) + "'},"
+                #             if Path(weights).stem == 'personcount':
+                #                 personnum = personnum+1
+                #             if Path(weights).stem == 'arm':
+                #                 if labelname == 'arm':
+                #                     flag = False
+                #             if Path(weights).stem == 'persontre':
+                #                 if c ==4:
+                #                     tredir.setdefault(0,[])
+                #                     tredir[0].append(xyxy)
+                #                 elif c in [0,1,2]:
+                #                     tredir.setdefault(1,[])
+                #                     tredir[1].append(xyxy)
+                #         print('not dence-----------------------------------------------------')
+                #     #if self.opt.save_crop:
+                #     #    save_one_box1(xyxy, imc, file=self.save_dir / 'crops' / self.names[c] / f'{p.stem}.jpg',
+                # #                  BGR=Truei)
+                # #message = message[:-1] + ']'
+                # #if len(message)>2:
+                #     #print(message)
+                #     #print(dirmodel[channels[i]]['web'])
+                #     #print(dirmodel[channels[i]]['websoc'])
+                #     # await dirmodel[channels[i]]['websoc'].send(message)
+                #     # await asyncio.sleep(0.005)
+                # if Path(weights).stem == 'personcount':
+                #     if personnum < int(personcountdir[channels[i]]):
+                #         flag = False
+                # if Path(weights).stem == 'persontre':
+                #     if len(list(tredir.keys()))==2:
+                #             result = judgeiou(tredir)
+                #             if result == True:
+                #                 flag = True
+                #             else:
+                #                 flag = False
+                #     else:
+                #         flag = False
+
+            #if len(self.dirmodel[channels[i]]['framelist'])>=50:
+            #    self.dirmodel[channels[i]]['framelist'].pop(0)
+            #    self.dirmodel[channels[i]]['framelist'].append(annotator.result())
+            #else:
+            #    self.dirmodel[channels[i]]['framelist'].append(annotator.result())
+            if flag:
+                #if self.dirmodel[path[i]]['imgtime'] != videotime[i]:
+                dirmodel[channels[i]]['detframe'].pop(0)
+                dirmodel[channels[i]]['detframe'].append(1)
+                dirmodel[channels[i]]['preim'] = im0
+                dirmodel[channels[i]]['oripreim'] = imc
+                dirmodel[channels[i]]['posttime'] = videotime[i]
+                print(dirmodel[channels[i]]['detframe'])
+                #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
+            else:
+                #print(f'deti= {i}')
+                #print(detframe[i])
+                #if self.dirmodel[channels[i]]['imgtime'] != videotime[i]:
+                dirmodel[channels[i]]['detframe'].pop(0)
+                dirmodel[channels[i]]['detframe'].append(0)
+                print(dirmodel[channels[i]]['detframe'])
+                #self.dirmodel[channels[i]]['imgtime'] = videotime[i]
+                #print(detframe[i])
+            # Stream results
+            #im0 = annotator.result()
+            #print(f'i = {i}')
+            #print(channels[i])
+            #print(postpretime[i])
+            #print(detframe[i])
+            if not dirmodel[channels[i]]['detflag'] and dirmodel[channels[i]]['detframe'].count(1)>=1:
+                dirmodel[channels[i]]['detflag'] = True
+                dirmodel[channels[i]]['detpretime'] = time.time()
+            elif dirmodel[channels[i]]['detframe'].count(1)==0 :
+                dirmodel[channels[i]]['detflag'] = False
+                dirmodel[channels[i]]['detpretime'] = float('inf')
+            # Stream results
+            #im0 = annotator.result()
+
+            if time.time() - dirmodel[channels[i]]['postpretime'] >30 and time.time() - dirmodel[channels[i]]['detpretime'] > dirmodel[channels[i]]['durtime']  and dirmodel[channels[i]]['detflag']:
+            #print()
+            #if time.time() - self.dirmodel[channels[i]]['postpretime'] >30 and self.dirmodel[channels[i]]['detflag']:
+                #print(time.time() - self.dirmodel[channels[i]]['detpretime'])
+                #print(self.dirmodel[channels[i]]['detflag'])
+                #print(f'{Path(self.opt.weights).stem}post----{time.strftime("%Y-%m-%d %H:%M:%S", time.time()}')
+                #time.sleep(30)
+             #print(time.time() - postpretime[i])
+                #print('111111111111111111111111111111111111111111111111')
+                #print(dirmodel[channels[i]]['preim'].shape)
+                success, encoded_image = cv2.imencode('.jpg', dirmodel[channels[i]]['preim'])
+                content = encoded_image.tobytes()
+                successori, encoded_imageori = cv2.imencode('.jpg', dirmodel[channels[i]]['oripreim'])
+                contentori = encoded_imageori.tobytes()
+                filename = f'{p.stem}_{int(time.time())}.jpg'
+                filenameori = f'ori{p.stem}_{int(time.time())}.jpg'
+                print(f'str(p) {p.name}')
+                print(channels[i])
+                payload = {'channel': dirmodel[channels[i]]['channel'],
+                                   'classIndex': dirmodel[channels[i]]['classindex'],
+                                   'ip': dirmodel[channels[i]]['algip'],
+                                   'videoTime': time.strftime('%Y-%m-%d %H:%M:%S', dirmodel[channels[i]]['posttime']),
+                                   'videoUrl': channels[i]}
+
+                fourcc = cv2.VideoWriter_fourcc(*'MP4V')
+                fps = 6
+                height,width,_ = dirmodel[channels[i]]['preim'].shape
+                year=time.strftime('%Y',time.localtime(time.time()))
+                month=time.strftime('%m',time.localtime(time.time()))
+                day=time.strftime('%d',time.localtime(time.time()))
+                vidsavefold = f'/mnt/yolo/videos/{Path(weights).stem}/{year}/{month}/{day}'
+                vidsaveflod = Path(vidsavefold)
+                vidsaveflod.mkdir(parents=True, exist_ok=True)
+                timesave = time.strftime('%Y-%m-%d-%H:%M:%S', dirmodel[channels[i]]['posttime'])
+                #out = cv2.VideoWriter(f'{vidsaveflod}/{timesave}.mp4', fourcc, fps, (width, height))
+                #for imgframe in self.dirmodel[channels[i]]['framelist']:
+                    #success, encoded_image = cv2.imencode('.jpg', self.dirmodel[channels[i]]['preim'])
+                    #content = encoded_image.tobytes()
+                #    out.write(imgframe)
+                #out.release()
+                files = [
+                            ('file', (filename, content, 'image/jpeg')),
+                            ('oldFile',(filenameori, contentori, 'image/jpeg')),
+                            #('videoFile',open(f'{vidsaveflod}/{timesave}.mp4','rb'))
+                        ]
+
+                print(f'{Path(weights).stem}post----{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))}')
+                
+                try:
+                #    if self.dirmodel[channels[i]]['postvidpretime'] != self.dirmodel[channels[i]]['posttime'] and not self.dirmodel[channels[i]]['postvideo']:
+                    resulttext = requests.post(url, data=payload, files=files)
+                #        self.dirmodel[channels[i]]['postvidpretime'] = self.dirmodel[channels[i]]['posttime']
+                #        self.dirmodel[channels[i]]['postid'] = resulttext.json()['data']
+                #        self.dirmodel[channels[i]]['postframe'] = 0
+                #        self.dirmodel[channels[i]]['postvideo'] = True
+                #        self.dirmodel[channels[i]]['videoname'] = f'{vidsaveflod}/{timesave}.mp4'
+                        #videoqueuea.append(resulttext,f'{vidsaveflod}/{timesave}.mp4',)
+                    print(f'resulttext = {resulttext.json()["data"]}')
+                    
+                except Exception:
+                    print("posterror")
+                #if self.dirmodel[channels[i]]['postvideo']:
+
+                #print(f'resulttext = {resulttext}')
+                #time.sleep(3000)
+                dirmodel[channels[i]]['postpretime'] = time.time()
+                dirmodel[channels[i]]['detflag'] = False
+                #timesave = time.strftime('%Y-%m-%d-%H:%M:%S', self.dirmodel[channels[i]]['posttime'])
+                #year=time.strftime('%Y',time.localtime(time.time()))
+                #month=time.strftime('%m',time.localtime(time.time()))
+                #day=time.strftime('%d',time.localtime(time.time()))
+                savefold = f'/mnt/yolo/images/{Path(weights).stem}/{year}/{month}/{day}'
+                saveflod = Path(savefold)
+                detsavefold = f'/mnt/yolo/detimages/{Path(weights).stem}/{year}/{month}/{day}'
+                detsavefold = Path(detsavefold)
+                saveflod.mkdir(parents=True, exist_ok=True)
+                detsavefold.mkdir(parents=True, exist_ok=True)
+                cv2.imwrite(f'{savefold}/{timesave}.jpg',dirmodel[channels[i]]['oripreim'])
+                cv2.imwrite(f'{detsavefold}/{timesave}det.jpg',dirmodel[channels[i]]['preim'])
+                
+            self.view_img = False
+            if self.view_img:
+                if platform.system() == 'Linux' and p not in windows:
+                    windows.append(p)
+                    cv2.namedWindow(f'{str(p)}-{Path(weights).stem}',
+                                    cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
+                    cv2.resizeWindow(f'{str(p)}-{Path(weights).stem}', im0.shape[1], im0.shape[0])
+                im1 = cv2.resize(im0, (1280, 720))
+                cv2.imshow(f'{str(p)}-{Path(weights).stem}', im1)
+                cv2.waitKey(1)  # 1 millisecond
+
+            # Save results (image with detections)
+
+            # Print time (inference-only)
+            print(f'channels[i]={channels[i]}')
+            LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms {time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time()))} {Path(weights).stem}")
+
+    # def load(self):
+    #     conn = sqlite3.connect(self.source)
+    #     c = conn.cursor()
+    #     while True:
+    #         #
+    #         # print ("数据库打开成功")
+    #
+    #         cursor = c.execute(
+    #             "SELECT modelname, addstream,delstream,streaming  from CHANGESTREAM WHERE modelname= (?)",(Path(self.opt.weights).stem,))
+    #         # content = cursor.fetchall()
+    #         # if content[0][1] ==1 or content[0][2] ==1:
+    #         #     c.execute("UPDATE CHANGESTREAM set streamimg = 1 where modelname='yolov5s'")
+    #         #     print("updata changestream")
+    #         #     conn.commit()
+    #         # cursor = c.execute(
+    #         # "SELECT modelname, addstream,delstream,streamimg  from CHANGESTREAM WHERE modelname='yolov5s'")
+    #         self.contentid = cursor.fetchall()
+    #         #global tag
+    #         #tag = Value('i', self.contentid[0][3])
+    #         #print(tag.value==1)
+    #         print(f'loadcontent={self.contentid[0][3]}')
+    #         time.sleep(3)
+    #     c.close()
+    #     conn.close()
+    def readpoint(self,weights):
+        #conn = sqlite3.connect(self.source)
+        #c = conn.cursor()
+        #cursor = c.execute(
+        #    "SELECT address,fence,point,channel,classindex,ip ,algip,label, durtime from STREAM WHERE modelname= (?)",
+        #    (Path(self.opt.weights).stem,))
+        data = {
+                    "algorithmCode": algmodel[Path(weights).stem],
+                    "deviceIp":None
+                }
+        dirmodel = {}
+        result = requests.post(url=urlt,data=data).json()['data']
+        channell=[]
+        for info in result: 
+        #content = cursor.fetchall()
+        #self.dirmodel = {}
+        #for address,fence,point,channel,classindex,ip ,algip,label,durtime in content:
+            #address = f'{address[:-1]}0'
+            channel = info["deviceChannel"]
+            channell.append(channel)
+            dirmodel[channel] = {}
+            dirmodel[channel]['fence'] = 1 if len(info["electricFence"])>0 else 0
+            #self.dirmodel[channel]['point'] = point
+            dirmodel[channel]['channel'] = info['deviceChannel']
+            dirmodel[channel]['classindex'] = info['algorithmCode']
+            dirmodel[channel]['ip'] = info['deviceIp']
+            dirmodel[channel]['algip'] = info['deviceAlgorithmIp']
+            dataele = {
+                    "algorithmCode": dirmodel[channel]['classindex'],
+                    "algorithmIp":dirmodel[channel]['algip'],
+                    "channel":dirmodel[channel]['channel']
+                }
+            resultele = requests.post(url=urlele,data=dataele).json()['data']['pointCollections']
+            resultele = resultele.split(',||')
+            resultele = tuple(resultele)
+            point = '%s:'*len(resultele) %resultele
+            if Path(weights).stem == 'personcount':
+                resultper = requests.post(url=urlperson,data=dataele).json()['data']
+                personcountdir[channel] = resultper
+            dirmodel[channel]['point'] = point[:-2]
+            dirmodel[channel]['preim'] = None
+            dirmodel[channel]['oripreim'] = None
+            dirmodel[channel]['detframe'] = [0 for _ in range(2)]
+            dirmodel[channel]['postpretime'] = 0
+            dirmodel[channel]['detflag'] = False
+            dirmodel[channel]['detpretime'] = float('inf')
+            dirmodel[channel]['label'] = modellabeldir[data['algorithmCode']]
+            dirmodel[channel]['durtime'] = 0
+            dirmodel[channel]['posttime'] = 0
+            dirmodel[channel]['track_history'] = defaultdict(lambda: [])
+            dirmodel[channel]['time_stamps'] = defaultdict(lambda: deque(maxlen=50))
+            dirmodel[channel]['instantaneous_velocities'] = defaultdict(lambda: deque(maxlen=30))
+            #tmp = f'{weburl}/{channel}/{info["deviceAlgorithmIp"]}'
+            #dirmodel[channel]['web'] = f'{weburl}/{info["deviceId"]}/{info["algorithmCode"]}'
+        print(dirmodel)
+        return sorted(channell),dirmodel
+    #def strtolst(self,str):
+    #    str = str.split(":")
+    #    lista = []
+    #    for liststr in str:
+    #        if len(liststr) > 0:
+    #            li = liststr.split(',')
+    #            listpoint = []
+    #            for i, j in zip(li[::2], li[1::2]):
+    #                listpoint.append((i, j))
+    #            lista.append(listpoint)
+    #    return listpoint
+
+
+#def preprocess():
+#        print('preprocess-----------------------------------------------')
+
+# def getframe(queue,channelsl,source,tt,numworks,lock,numworkv):
+#     while True:
+#         print("dataloader")
+#         imgsz = [640, 640]
+#         print(f'source = {source}')
+#         print(f'inchannel ={channelsl}')
+#
+#         bs = len(dataset)
+#         vid_path, vid_writer = [None] * bs, [None] * bs
+#         # self.detframe = [[0 for _ in range(8)] for i in range(bs)]
+#         # self.postpretime = [0]*bs
+#         # Run inference
+#
+#         #imgsz = (1 , 3, *self.imgsz)
+#         print(imgsz)
+#         #self.model.warmup(imgsz=(1 , 3, *imgsz))  # warmup
+#         seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+#         sourcebase = 'project.db'
+#         #
+#         # print ("数据库打开成功")
+#         pretime = time.time()
+#         tag = 0
+#         for path, im, im0s, vid_cap, s, videotime,channels in dataset:
+#             # print('*'*21)
+#             # global tag
+#             if time.time()-pretime > 30:
+#                 channellist = []
+#                 pretime = time.time()
+#                 data = {
+#                     "algorithmCode": None,
+#                     "deviceIp":None
+#                 }
+#                 result = requests.post(url=urlt,data=data).json()['data']
+#                 for info in result:
+#                     channellist.append((info['deviceChannel'],info['playbackAddress']))
+#                 channelsa  = []
+#                 sourcea = []
+#                 channellist = set(channellist)
+#                 channellist = sorted(channellist,key=lambda x:x[0])
+#                 #channellist = set(channellist)
+#                 for cha,add in channellist:
+#                     channelsa.append(cha)
+#                     sourcea.append(add)
+#                 channelsl = sorted(channelsl)
+#                 #channelsa = sorted(channelsa)
+#                 if channelsa!=channelsl:
+#                     print(f'channelsa = {channelsa}')
+#                     print(f'channelsl = {channelsl}')
+#                     dataset.close()
+#                     channelsl = channelsa
+#                     source = sourcea
+#                     break;
+#
+#                 #conn = sqlite3.connect(sourcebase)
+#                 #c = conn.cursor()
+#                 #cursor = c.execute("SELECT modelname, addstream,delstream,streaming  from CHANGESTREAM WHERE modelname = 'stream'")
+#                 #contentid = cursor.fetchall()
+#                 #tag = contentid[0][3]
+#             #if tag == 1:
+#             #    lock.acquire()
+#             #    numworkv.value += 1
+#             #    dataset.close()
+#             #    if numworkv.value==3:
+#             #        print('newstreaming=', tag)
+#             #        conn = sqlite3.connect(source)
+#             #        c = conn.cursor()
+#             #        c.execute("UPDATE CHANGESTREAM set streaming = 0 , addstream=0,delstream=0 where modelname= 'helmet'")
+#             #        print(123)
+#             #        conn.commit()
+#             #        c.close()
+#             #        conn.close()
+#             #    lock.release()
+#             #    print('opencv1')
+#                 # cv2.destroyAllWindows()
+#             #    print('opencv')
+#             #    break
+#             #else:
+#             #    print('nonewstreaming=', tag)
+#             queue.put((path, im, im0s, vid_cap, s, videotime,channels))
+#             queue.get() if queue.qsize() > 3 else time.sleep(0.001)
+#
+# def getmutpro(channels,source,streamlist,numworkv,lock,numworks=1):
+#     processlist = []
+#     queuelist = []
+#     for i in range(numworks):
+#         queue = Queue(maxsize=4)
+#         process = Process(target=getframe,
+#                         args=(queue, channels,source, i,numworks,lock,numworkv))
+#         processlist.append(process)
+#         process.start()
+#         queuelist.append(queue)
+#     return queuelist
+#     #path = []
+#     #im0s = []
+#     #vid_cap = None
+#     #s = ''
+#     #videotime = []
+#     #while True:
+#     #    imlist = []
+#     #    pathlist = []
+#     #    im0slist = []
+#     #    channelslist = []
+#     #    vid_cap = None
+#     #    s = ''
+#     #    videotimelist = []
+#     #    for q in queuelist:
+#     #        if q.qsize()>0:
+#     #            setframe = q.get()
+#     #            path, im, im0s, vid_cap, s, videotime,channels = setframe
+#     #            channelslist += channels
+#     #            pathlist += path
+#     #            im0slist += im0s
+#     #            videotimelist += videotime
+#     #            imlist.append(im)
+#     #    if len(imlist)>0:
+#     #        im = np.concatenate(imlist)
+#     #    if len(pathlist)>0:
+#     #        print(len(path),im.shape,len(im0s))
+#     #        streamlist.append((pathlist, im, im0slist, vid_cap, s, videotimelist,channelslist))
+#             #print(f'streamlist = {len(streamlist)}')
+#     #    streamlist.pop(0) if len(streamlist) > 3 else time.sleep(0.001)
+
+def modelfun(detectdemo,weights,classify=False,conf_thres=0.80,device='',runmodel=None):
+    print(weights)
+    #detectdemo=Detect(weights=weights,source=sourcedb,classify=classify,conf_thres=conf_thres,device=device)
+    detectdemo.infer(weights,classify,conf_thres,device,runmodel)
+    #detectdemo.infer(weights, classify, conf_thres, device, runmodel)
+
+def detectmut(channellist,source,modellist,contentlist,modeladir,runmodel={},deviceid=''):
+    detectdemo = Detect(channelsl=channellist,source=source)
+        # while True:
+    for modelname in modellist:
+        if modelname in contentlist:
+            #if modelname not in runmodel:
+        #print(i)
+        #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
+                #if modelname in ['fall','helmet','bag','arm']:
+            print(f'weights ={modelname}.pt deviceid {deviceid}')
+            #c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
+            #rea = c.fetchall()
+            process = threading.Thread(target=modelfun,args=(detectdemo,f'{modelname}.pt',modeladir[modelname]['cla'],modeladir[modelname]['conf'],deviceid,runmodel))
+            #elif modelname in ['personcount','persontre']:
+            #    process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,False,0.50,'',runmodel))
+            #elif modelname in ['uniform']:
+            #    process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,True,0.50,'',runmodel))
+            #else:
+            #    process = Process(target=modelfun,args=(streamlist,f'{modelname}.pt',dbpath))
+            #runmodel[modelname] = 1
+    #processes.append(process)
+    #process.start()
+    #detectobj = Process(target=detectdemo.infer,args=(queue,))
+    # Detect(weights=f'{i[0]}.pt')
+
+            time.sleep(3)
+            process.start()
+
+
+
+
+def parse_opt():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
+    opt = parser.parse_args()
+    return opt
+
+
+def main(opt):
+    check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+    run(**vars(opt))
+
+
+if __name__ == '__main__':
+    torch.multiprocessing.set_start_method('spawn')
+    #set_start_method('spawn')
+    #multiprocessing.set_start_method('spawn')
+    torch.cuda.set_per_process_memory_fraction(0.6)
+    opt = parse_opt()
+    dbpath = 'project.db'
+    conn = sqlite3.connect(dbpath)
+#
+# print ("数据库打开成功")
+    c = conn.cursor()
+    task(c,conn,urlt,urla)
+    cursor = c.execute('select channel,algip  from stream where modelname = "run"')
+    result = cursor.fetchall()
+    for channel ,algip in result:
+        data = {
+        "channel": channel,
+        "ip":algip
+        }
+        #personcountdir[channel] = num
+        address = requests.post(url=urlrtsp,data=data).json()['msg']
+        c.execute('UPDATE STREAM set address= (?) where channel =(?)',(address,channel))
+    conn.commit()
+    cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'run'")
+    #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+    content = cursor.fetchall()
+    contentlist = []
+    for con in content:
+        contentlist.append(con[0])
+    #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
+    cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname = 'run'")
+    contenta = cursor.fetchall()
+    source = []
+    modellist = []
+    addcha = []
+    channellist = []
+    for i in contenta:
+        addcha.append((i[0],i[2]))
+        modellist.append(i[1])
+    addcha = set(addcha)
+    addcha = sorted(addcha,key=lambda x:x[1])
+    for a,cha in addcha:
+        source.append(a)
+        channellist.append(cha)
+    print(addcha)
+    #source = set(source)
+    source = list(source)
+    #source.sort()
+    modellist = set(modellist)
+    n = len(content)
+    print(f'modelname={n}')
+    print(content)
+    #content.reverse()
+    print(content)
+    # main(opt)
+    #processes = []
+    streamqueue = Queue(maxsize=4)
+    numworkv = Value('i', 0)
+    manager = Manager()
+    lock = multiprocessing.Lock()
+    streamlist = manager.list()
+    numworks = 2
+
+    #queuelist = getmutpro(channellist,source, streamlist, numworkv, lock, numworks)
+    #thread.start()
+    #videoqueue = Queue(maxsize=20)
+    #thread1 = threading.Thread(target=postvideo, args=(videoqueue,))
+    #thread1.start()
+    #pool = ThreadPoolExecutor(max_workers=n)
+    #runmodel = manager.dict()
+    modeladir = {}
+    for modelname in modellist:
+        if modelname in contentlist:
+            #if modelname not in runmodel:
+        #print(i)
+        #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
+                #if modelname in ['fall','helmet','bag','arm']:
+            print(f'weights ={modelname}.pt')
+            c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
+            rea = c.fetchall()
+            #print(f'rea')
+            modeladir.setdefault(modelname,{})
+            modeladir[modelname]['conf'] = rea[0][0]
+            modeladir[modelname]['cla'] = rea[0][1]
+    runmodel = {}
+    for deviceid,num in enumerate(range(0,len(channellist),13)):
+        process = Process(target=detectmut,args=(channellist[num:num+13],source[num:num+13],modellist,contentlist,modeladir,{},0))
+        time.sleep(3)
+        process.start()
+        #deviceid = deviceid+1
+        # detectdemo = Detect(channelsl=channellist[0:num],source=source[0:num])
+        # # while True:
+        # for modelname in modellist:
+        #     if modelname in contentlist:
+        #         if modelname not in runmodel:
+        #     #print(i)
+        #     #detectdemo=Detect(weights=f'/mnt/project/yolodemo/yolov5-master/{i[0]}.pt')
+        #             #if modelname in ['fall','helmet','bag','arm']:
+        #             print(f'weights ={modelname}.pt')
+        #             c.execute('select conf,cla from changestream where modelname = (?)',(modelname,))
+        #             rea = c.fetchall()
+        #             process = threading.Thread(target=modelfun,args=(detectdemo,f'{modelname}.pt',rea[0][1],rea[0][0],'',runmodel))
+        #             #elif modelname in ['personcount','persontre']:
+        #             #    process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,False,0.50,'',runmodel))
+        #             #elif modelname in ['uniform']:
+        #             #    process = Process(target=modelfun,args=(streamlist,videoqueue,f'{modelname}.pt',dbpath,True,0.50,'',runmodel))
+        #             #else:
+        #             #    process = Process(target=modelfun,args=(streamlist,f'{modelname}.pt',dbpath))
+        #             runmodel[modelname] = 1
+        #     #processes.append(process)
+        #     #process.start()
+        #     #detectobj = Process(target=detectdemo.infer,args=(queue,))
+        #     # Detect(weights=f'{i[0]}.pt')
+
+        #             time.sleep(3)
+        #             process.start()
+        # time.sleep(900)
+        # task(c,conn,urlt,urla)
+        # cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet' or modelname = 'fall' or modelname = 'uniform' or modelname = 'personcount' or modelname = 'arm' or modelname = 'bag'")
+        # #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'helmet'")
+        # content = cursor.fetchall()
+        # contentlist = []
+        # for con in content:
+        #     contentlist.append(con[0])
+        # #cursor = c.execute("SELECT address,modelname,channel from STREAM where modelname='helmet' or modelname = 'sleep' or modelname = 'smoke' or modelname = 'danager'or modelname = 'gloves' or modelname = 'other'")
+        # cursor = c.execute("SELECT address,modelname,channel from STREAM ")
+        # contenta = cursor.fetchall()
+        # source = []
+        # modellist = []
+        # addcha = []
+        # channellist = []
+        # for i in contenta:
+        #     addcha.append((i[0],i[2]))
+        #     modellist.append(i[1])
+        # addcha = set(addcha)
+        # addcha = sorted(addcha)
+        # for a,cha in addcha:
+        #     source.append(a)
+        #     channellist.append(cha)
+        # print(addcha)
+        # #source = set(source)
+        # source = list(source)
+        # #source.sort()
+        # modellist = set(modellist)
+        # n = len(content)
+        # print(f'modelname={n}')
+        # print(content)
+        # #content.reverse()
+        # print(content)
+        #pool.submit(detectobj.infer)
+
+    #cursor = c.execute("SELECT modelname from CHANGESTREAM where modelname = 'fall'")
+    #content = cursor.fetchall()
+    #n = len(content)
+    #print(f'modelname={n}')
+    #print(content)
+    #content.reverse()
+    #print(content)
+    # main(opt)
+    #processes = []
+    #pool = ProcessPoolExecutor(max_workers=n)
+    #for i in content:
+        #print(i)
+        #detectdemo=Detect(weights=f'{i[0]}.pt')
+        #process = Process(target=detectdemo.infer)
+        #processes.append(process)
+        #process.start()
+        #detectobj = Detect(weights=f'{i[0]}.pt')
+    #    time.sleep(3)
+        #pool.submit(detectobj.infer)
+    #    print('111111111111111111111111111111111111111111111111111111111')
+        #pool.submit(TestA().func1, '张三', i)
+        #print('----------------------------------------------------------------')
+    #time.sleep(3000)
+    # 等待所有进程执行完毕
+    #for process in processes:
+    #    process.join()
+
+        #pool.submit(Detect(weights=f'{i[0]}.pt').infer)
+    # if isinstance(opt.weights,list):
+    #     opt.weights = opt.weights[0]
+    #signal.signal(signal.SIGINT, my_handler)
+    #detectdemo1 = Detect(weights=f'{content[0][0]}.pt')
+    #detectdemo1.infer()
+    #a = Test
+    #with ProcessPoolExecutor(3) as ppool:
+        #for i in range(3):
+    #        print('hello')
+                        #ppool.submit(self.postprocess,pred[i::3],path[i::3],im0s[i::3],dataset,im[i::3],s)
+            #ppool.submit(TestA().func1, '张三', i)
+    #ta = TestA()
+    #with ProcessPoolExecutor(5) as ppool: # 创建一个5个进程的进程池
+    #    for i in range(1, 4):
+    #        ppool.submit(func1, '张三', i)
+    #f1= pool.submit(detectdemo1.infer)
+    # print("线程1-----------------------------------------------------------------------------------")
+    #detectdemo2 = Detect(weights=r"helmet.pt")
+    #f2=pool.submit(detectdemo2.infer)
+    # print("线程2-------------------------------------------------------------------------------------")
+    #detectdemo3 = threading.Thread(target=detectdemo3.infer)
+    #detectdemo3 = Detect(weights=r"fall.pt")
+    #f3=pool.submit(detectdemo3.infer)

+ 0 - 0
models/__init__.py


BIN
models/__pycache__/__init__.cpython-311.pyc


BIN
models/__pycache__/__init__.cpython-38.pyc


BIN
models/__pycache__/common.cpython-311.pyc


BIN
models/__pycache__/common.cpython-38.pyc


BIN
models/__pycache__/experimental.cpython-38.pyc


BIN
models/__pycache__/yolo.cpython-38.pyc


+ 871 - 0
models/common.py

@@ -0,0 +1,871 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Common modules
+"""
+
+import ast
+import contextlib
+import json
+import math
+import platform
+import warnings
+import zipfile
+from collections import OrderedDict, namedtuple
+from copy import copy
+from pathlib import Path
+from urllib.parse import urlparse
+
+import cv2
+import numpy as np
+import pandas as pd
+import requests
+import torch
+import torch.nn as nn
+from PIL import Image
+from torch.cuda import amp
+
+from utils import TryExcept
+from utils.dataloaders import exif_transpose, letterbox
+from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
+                           increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
+                           xyxy2xywh, yaml_load)
+from utils.plots import Annotator, colors, save_one_box
+from utils.torch_utils import copy_attr, smart_inference_mode
+
+
+def autopad(k, p=None, d=1):  # kernel, padding, dilation
+    # Pad to 'same' shape outputs
+    if d > 1:
+        k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k]  # actual kernel-size
+    if p is None:
+        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
+    return p
+
+
+class Conv(nn.Module):
+    # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
+    default_act = nn.SiLU()  # default activation
+
+    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
+        super().__init__()
+        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
+        self.bn = nn.BatchNorm2d(c2)
+        self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
+
+    def forward(self, x):
+        return self.act(self.bn(self.conv(x)))
+
+    def forward_fuse(self, x):
+        return self.act(self.conv(x))
+
+
+class DWConv(Conv):
+    # Depth-wise convolution
+    def __init__(self, c1, c2, k=1, s=1, d=1, act=True):  # ch_in, ch_out, kernel, stride, dilation, activation
+        super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
+
+
+class DWConvTranspose2d(nn.ConvTranspose2d):
+    # Depth-wise transpose convolution
+    def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0):  # ch_in, ch_out, kernel, stride, padding, padding_out
+        super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
+
+
+class TransformerLayer(nn.Module):
+    # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
+    def __init__(self, c, num_heads):
+        super().__init__()
+        self.q = nn.Linear(c, c, bias=False)
+        self.k = nn.Linear(c, c, bias=False)
+        self.v = nn.Linear(c, c, bias=False)
+        self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
+        self.fc1 = nn.Linear(c, c, bias=False)
+        self.fc2 = nn.Linear(c, c, bias=False)
+
+    def forward(self, x):
+        x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
+        x = self.fc2(self.fc1(x)) + x
+        return x
+
+
+class TransformerBlock(nn.Module):
+    # Vision Transformer https://arxiv.org/abs/2010.11929
+    def __init__(self, c1, c2, num_heads, num_layers):
+        super().__init__()
+        self.conv = None
+        if c1 != c2:
+            self.conv = Conv(c1, c2)
+        self.linear = nn.Linear(c2, c2)  # learnable position embedding
+        self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
+        self.c2 = c2
+
+    def forward(self, x):
+        if self.conv is not None:
+            x = self.conv(x)
+        b, _, w, h = x.shape
+        p = x.flatten(2).permute(2, 0, 1)
+        return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
+
+
+class Bottleneck(nn.Module):
+    # Standard bottleneck
+    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, shortcut, groups, expansion
+        super().__init__()
+        c_ = int(c2 * e)  # hidden channels
+        self.cv1 = Conv(c1, c_, 1, 1)
+        self.cv2 = Conv(c_, c2, 3, 1, g=g)
+        self.add = shortcut and c1 == c2
+
+    def forward(self, x):
+        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
+
+
+class BottleneckCSP(nn.Module):
+    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
+    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
+        super().__init__()
+        c_ = int(c2 * e)  # hidden channels
+        self.cv1 = Conv(c1, c_, 1, 1)
+        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
+        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
+        self.cv4 = Conv(2 * c_, c2, 1, 1)
+        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
+        self.act = nn.SiLU()
+        self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
+
+    def forward(self, x):
+        y1 = self.cv3(self.m(self.cv1(x)))
+        y2 = self.cv2(x)
+        return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
+
+
+class CrossConv(nn.Module):
+    # Cross Convolution Downsample
+    def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
+        # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
+        super().__init__()
+        c_ = int(c2 * e)  # hidden channels
+        self.cv1 = Conv(c1, c_, (1, k), (1, s))
+        self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
+        self.add = shortcut and c1 == c2
+
+    def forward(self, x):
+        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
+
+
+class C3(nn.Module):
+    # CSP Bottleneck with 3 convolutions
+    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
+        super().__init__()
+        c_ = int(c2 * e)  # hidden channels
+        self.cv1 = Conv(c1, c_, 1, 1)
+        self.cv2 = Conv(c1, c_, 1, 1)
+        self.cv3 = Conv(2 * c_, c2, 1)  # optional act=FReLU(c2)
+        self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
+
+    def forward(self, x):
+        return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
+
+
+class C3x(C3):
+    # C3 module with cross-convolutions
+    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+        super().__init__(c1, c2, n, shortcut, g, e)
+        c_ = int(c2 * e)
+        self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))
+
+
+class C3TR(C3):
+    # C3 module with TransformerBlock()
+    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+        super().__init__(c1, c2, n, shortcut, g, e)
+        c_ = int(c2 * e)
+        self.m = TransformerBlock(c_, c_, 4, n)
+
+
+class C3SPP(C3):
+    # C3 module with SPP()
+    def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
+        super().__init__(c1, c2, n, shortcut, g, e)
+        c_ = int(c2 * e)
+        self.m = SPP(c_, c_, k)
+
+
+class C3Ghost(C3):
+    # C3 module with GhostBottleneck()
+    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+        super().__init__(c1, c2, n, shortcut, g, e)
+        c_ = int(c2 * e)  # hidden channels
+        self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
+
+
+class SPP(nn.Module):
+    # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
+    def __init__(self, c1, c2, k=(5, 9, 13)):
+        super().__init__()
+        c_ = c1 // 2  # hidden channels
+        self.cv1 = Conv(c1, c_, 1, 1)
+        self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
+        self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
+
+    def forward(self, x):
+        x = self.cv1(x)
+        with warnings.catch_warnings():
+            warnings.simplefilter('ignore')  # suppress torch 1.9.0 max_pool2d() warning
+            return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
+
+
+class SPPF(nn.Module):
+    # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
+    def __init__(self, c1, c2, k=5):  # equivalent to SPP(k=(5, 9, 13))
+        super().__init__()
+        c_ = c1 // 2  # hidden channels
+        self.cv1 = Conv(c1, c_, 1, 1)
+        self.cv2 = Conv(c_ * 4, c2, 1, 1)
+        self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
+
+    def forward(self, x):
+        x = self.cv1(x)
+        with warnings.catch_warnings():
+            warnings.simplefilter('ignore')  # suppress torch 1.9.0 max_pool2d() warning
+            y1 = self.m(x)
+            y2 = self.m(y1)
+            return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
+
+
+class Focus(nn.Module):
+    # Focus wh information into c-space
+    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups
+        super().__init__()
+        self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
+        # self.contract = Contract(gain=2)
+
+    def forward(self, x):  # x(b,c,w,h) -> y(b,4c,w/2,h/2)
+        return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
+        # return self.conv(self.contract(x))
+
+
+class GhostConv(nn.Module):
+    # Ghost Convolution https://github.com/huawei-noah/ghostnet
+    def __init__(self, c1, c2, k=1, s=1, g=1, act=True):  # ch_in, ch_out, kernel, stride, groups
+        super().__init__()
+        c_ = c2 // 2  # hidden channels
+        self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
+        self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
+
+    def forward(self, x):
+        y = self.cv1(x)
+        return torch.cat((y, self.cv2(y)), 1)
+
+
+class GhostBottleneck(nn.Module):
+    # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
+    def __init__(self, c1, c2, k=3, s=1):  # ch_in, ch_out, kernel, stride
+        super().__init__()
+        c_ = c2 // 2
+        self.conv = nn.Sequential(
+            GhostConv(c1, c_, 1, 1),  # pw
+            DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(),  # dw
+            GhostConv(c_, c2, 1, 1, act=False))  # pw-linear
+        self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1,
+                                                                            act=False)) if s == 2 else nn.Identity()
+
+    def forward(self, x):
+        return self.conv(x) + self.shortcut(x)
+
+
+class Contract(nn.Module):
+    # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
+    def __init__(self, gain=2):
+        super().__init__()
+        self.gain = gain
+
+    def forward(self, x):
+        b, c, h, w = x.size()  # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
+        s = self.gain
+        x = x.view(b, c, h // s, s, w // s, s)  # x(1,64,40,2,40,2)
+        x = x.permute(0, 3, 5, 1, 2, 4).contiguous()  # x(1,2,2,64,40,40)
+        return x.view(b, c * s * s, h // s, w // s)  # x(1,256,40,40)
+
+
+class Expand(nn.Module):
+    # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
+    def __init__(self, gain=2):
+        super().__init__()
+        self.gain = gain
+
+    def forward(self, x):
+        b, c, h, w = x.size()  # assert C / s ** 2 == 0, 'Indivisible gain'
+        s = self.gain
+        x = x.view(b, s, s, c // s ** 2, h, w)  # x(1,2,2,16,80,80)
+        x = x.permute(0, 3, 4, 1, 5, 2).contiguous()  # x(1,16,80,2,80,2)
+        return x.view(b, c // s ** 2, h * s, w * s)  # x(1,16,160,160)
+
+
+class Concat(nn.Module):
+    # Concatenate a list of tensors along dimension
+    def __init__(self, dimension=1):
+        super().__init__()
+        self.d = dimension
+
+    def forward(self, x):
+        return torch.cat(x, self.d)
+
+
+class DetectMultiBackend(nn.Module):
+    # YOLOv5 MultiBackend class for python inference on various backends
+    def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):
+        # Usage:
+        #   PyTorch:              weights = *.pt
+        #   TorchScript:                    *.torchscript
+        #   ONNX Runtime:                   *.onnx
+        #   ONNX OpenCV DNN:                *.onnx --dnn
+        #   OpenVINO:                       *_openvino_model
+        #   CoreML:                         *.mlmodel
+        #   TensorRT:                       *.engine
+        #   TensorFlow SavedModel:          *_saved_model
+        #   TensorFlow GraphDef:            *.pb
+        #   TensorFlow Lite:                *.tflite
+        #   TensorFlow Edge TPU:            *_edgetpu.tflite
+        #   PaddlePaddle:                   *_paddle_model
+        from models.experimental import attempt_download, attempt_load  # scoped to avoid circular import
+
+        super().__init__()
+        w = str(weights[0] if isinstance(weights, list) else weights)
+        pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
+        fp16 &= pt or jit or onnx or engine or triton  # FP16
+        nhwc = coreml or saved_model or pb or tflite or edgetpu  # BHWC formats (vs torch BCWH)
+        stride = 32  # default stride
+        cuda = torch.cuda.is_available() and device.type != 'cpu'  # use CUDA
+        if not (pt or triton):
+            w = attempt_download(w)  # download if not local
+
+        if pt:  # PyTorch
+            model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
+            stride = max(int(model.stride.max()), 32)  # model stride
+            names = model.module.names if hasattr(model, 'module') else model.names  # get class names
+            model.half() if fp16 else model.float()
+            self.model = model  # explicitly assign for to(), cpu(), cuda(), half()
+        elif jit:  # TorchScript
+            LOGGER.info(f'Loading {w} for TorchScript inference...')
+            extra_files = {'config.txt': ''}  # model metadata
+            model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
+            model.half() if fp16 else model.float()
+            if extra_files['config.txt']:  # load metadata dict
+                d = json.loads(extra_files['config.txt'],
+                               object_hook=lambda d: {
+                                   int(k) if k.isdigit() else k: v
+                                   for k, v in d.items()})
+                stride, names = int(d['stride']), d['names']
+        elif dnn:  # ONNX OpenCV DNN
+            LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
+            check_requirements('opencv-python>=4.5.4')
+            net = cv2.dnn.readNetFromONNX(w)
+        elif onnx:  # ONNX Runtime
+            LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
+            check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
+            import onnxruntime
+            providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
+            session = onnxruntime.InferenceSession(w, providers=providers)
+            output_names = [x.name for x in session.get_outputs()]
+            meta = session.get_modelmeta().custom_metadata_map  # metadata
+            if 'stride' in meta:
+                stride, names = int(meta['stride']), eval(meta['names'])
+        elif xml:  # OpenVINO
+            LOGGER.info(f'Loading {w} for OpenVINO inference...')
+            check_requirements('openvino')  # requires openvino-dev: https://pypi.org/project/openvino-dev/
+            from openvino.runtime import Core, Layout, get_batch
+            ie = Core()
+            if not Path(w).is_file():  # if not *.xml
+                w = next(Path(w).glob('*.xml'))  # get *.xml file from *_openvino_model dir
+            network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
+            if network.get_parameters()[0].get_layout().empty:
+                network.get_parameters()[0].set_layout(Layout('NCHW'))
+            batch_dim = get_batch(network)
+            if batch_dim.is_static:
+                batch_size = batch_dim.get_length()
+            executable_network = ie.compile_model(network, device_name='CPU')  # device_name="MYRIAD" for Intel NCS2
+            stride, names = self._load_metadata(Path(w).with_suffix('.yaml'))  # load metadata
+        elif engine:  # TensorRT
+            LOGGER.info(f'Loading {w} for TensorRT inference...')
+            import tensorrt as trt  # https://developer.nvidia.com/nvidia-tensorrt-download
+            check_version(trt.__version__, '7.0.0', hard=True)  # require tensorrt>=7.0.0
+            if device.type == 'cpu':
+                device = torch.device('cuda:0')
+            Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
+            logger = trt.Logger(trt.Logger.INFO)
+            with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
+                model = runtime.deserialize_cuda_engine(f.read())
+            context = model.create_execution_context()
+            bindings = OrderedDict()
+            output_names = []
+            fp16 = False  # default updated below
+            dynamic = False
+            for i in range(model.num_bindings):
+                name = model.get_binding_name(i)
+                dtype = trt.nptype(model.get_binding_dtype(i))
+                if model.binding_is_input(i):
+                    if -1 in tuple(model.get_binding_shape(i)):  # dynamic
+                        dynamic = True
+                        context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))
+                    if dtype == np.float16:
+                        fp16 = True
+                else:  # output
+                    output_names.append(name)
+                shape = tuple(context.get_binding_shape(i))
+                im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
+                bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
+            binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
+            batch_size = bindings['images'].shape[0]  # if dynamic, this is instead max batch size
+        elif coreml:  # CoreML
+            LOGGER.info(f'Loading {w} for CoreML inference...')
+            import coremltools as ct
+            model = ct.models.MLModel(w)
+        elif saved_model:  # TF SavedModel
+            LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
+            import tensorflow as tf
+            keras = False  # assume TF1 saved_model
+            model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
+        elif pb:  # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
+            LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
+            import tensorflow as tf
+
+            def wrap_frozen_graph(gd, inputs, outputs):
+                x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), [])  # wrapped
+                ge = x.graph.as_graph_element
+                return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
+
+            def gd_outputs(gd):
+                name_list, input_list = [], []
+                for node in gd.node:  # tensorflow.core.framework.node_def_pb2.NodeDef
+                    name_list.append(node.name)
+                    input_list.extend(node.input)
+                return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))
+
+            gd = tf.Graph().as_graph_def()  # TF GraphDef
+            with open(w, 'rb') as f:
+                gd.ParseFromString(f.read())
+            frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))
+        elif tflite or edgetpu:  # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
+            try:  # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
+                from tflite_runtime.interpreter import Interpreter, load_delegate
+            except ImportError:
+                import tensorflow as tf
+                Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
+            if edgetpu:  # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
+                LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
+                delegate = {
+                    'Linux': 'libedgetpu.so.1',
+                    'Darwin': 'libedgetpu.1.dylib',
+                    'Windows': 'edgetpu.dll'}[platform.system()]
+                interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
+            else:  # TFLite
+                LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
+                interpreter = Interpreter(model_path=w)  # load TFLite model
+            interpreter.allocate_tensors()  # allocate
+            input_details = interpreter.get_input_details()  # inputs
+            output_details = interpreter.get_output_details()  # outputs
+            # load metadata
+            with contextlib.suppress(zipfile.BadZipFile):
+                with zipfile.ZipFile(w, 'r') as model:
+                    meta_file = model.namelist()[0]
+                    meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))
+                    stride, names = int(meta['stride']), meta['names']
+        elif tfjs:  # TF.js
+            raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')
+        elif paddle:  # PaddlePaddle
+            LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
+            check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
+            import paddle.inference as pdi
+            if not Path(w).is_file():  # if not *.pdmodel
+                w = next(Path(w).rglob('*.pdmodel'))  # get *.pdmodel file from *_paddle_model dir
+            weights = Path(w).with_suffix('.pdiparams')
+            config = pdi.Config(str(w), str(weights))
+            if cuda:
+                config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
+            predictor = pdi.create_predictor(config)
+            input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
+            output_names = predictor.get_output_names()
+        elif triton:  # NVIDIA Triton Inference Server
+            LOGGER.info(f'Using {w} as Triton Inference Server...')
+            check_requirements('tritonclient[all]')
+            from utils.triton import TritonRemoteModel
+            model = TritonRemoteModel(url=w)
+            nhwc = model.runtime.startswith('tensorflow')
+        else:
+            raise NotImplementedError(f'ERROR: {w} is not a supported format')
+
+        # class names
+        if 'names' not in locals():
+            names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}
+        if names[0] == 'n01440764' and len(names) == 1000:  # ImageNet
+            names = yaml_load(ROOT / 'data/ImageNet.yaml')['names']  # human-readable names
+
+        self.__dict__.update(locals())  # assign all variables to self
+
+    def forward(self, im, augment=False, visualize=False):
+        # YOLOv5 MultiBackend inference
+        b, ch, h, w = im.shape  # batch, channel, height, width
+        if self.fp16 and im.dtype != torch.float16:
+            im = im.half()  # to FP16
+        if self.nhwc:
+            im = im.permute(0, 2, 3, 1)  # torch BCHW to numpy BHWC shape(1,320,192,3)
+
+        if self.pt:  # PyTorch
+            y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
+        elif self.jit:  # TorchScript
+            y = self.model(im)
+        elif self.dnn:  # ONNX OpenCV DNN
+            im = im.cpu().numpy()  # torch to numpy
+            self.net.setInput(im)
+            y = self.net.forward()
+        elif self.onnx:  # ONNX Runtime
+            im = im.cpu().numpy()  # torch to numpy
+            y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
+        elif self.xml:  # OpenVINO
+            im = im.cpu().numpy()  # FP32
+            y = list(self.executable_network([im]).values())
+        elif self.engine:  # TensorRT
+            if self.dynamic and im.shape != self.bindings['images'].shape:
+                i = self.model.get_binding_index('images')
+                self.context.set_binding_shape(i, im.shape)  # reshape if dynamic
+                self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)
+                for name in self.output_names:
+                    i = self.model.get_binding_index(name)
+                    self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
+            s = self.bindings['images'].shape
+            assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
+            self.binding_addrs['images'] = int(im.data_ptr())
+            self.context.execute_v2(list(self.binding_addrs.values()))
+            y = [self.bindings[x].data for x in sorted(self.output_names)]
+        elif self.coreml:  # CoreML
+            im = im.cpu().numpy()
+            im = Image.fromarray((im[0] * 255).astype('uint8'))
+            # im = im.resize((192, 320), Image.BILINEAR)
+            y = self.model.predict({'image': im})  # coordinates are xywh normalized
+            if 'confidence' in y:
+                box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]])  # xyxy pixels
+                conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
+                y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
+            else:
+                y = list(reversed(y.values()))  # reversed for segmentation models (pred, proto)
+        elif self.paddle:  # PaddlePaddle
+            im = im.cpu().numpy().astype(np.float32)
+            self.input_handle.copy_from_cpu(im)
+            self.predictor.run()
+            y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
+        elif self.triton:  # NVIDIA Triton Inference Server
+            y = self.model(im)
+        else:  # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
+            im = im.cpu().numpy()
+            if self.saved_model:  # SavedModel
+                y = self.model(im, training=False) if self.keras else self.model(im)
+            elif self.pb:  # GraphDef
+                y = self.frozen_func(x=self.tf.constant(im))
+            else:  # Lite or Edge TPU
+                input = self.input_details[0]
+                int8 = input['dtype'] == np.uint8  # is TFLite quantized uint8 model
+                if int8:
+                    scale, zero_point = input['quantization']
+                    im = (im / scale + zero_point).astype(np.uint8)  # de-scale
+                self.interpreter.set_tensor(input['index'], im)
+                self.interpreter.invoke()
+                y = []
+                for output in self.output_details:
+                    x = self.interpreter.get_tensor(output['index'])
+                    if int8:
+                        scale, zero_point = output['quantization']
+                        x = (x.astype(np.float32) - zero_point) * scale  # re-scale
+                    y.append(x)
+            y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
+            y[0][..., :4] *= [w, h, w, h]  # xywh normalized to pixels
+
+        if isinstance(y, (list, tuple)):
+            return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
+        else:
+            return self.from_numpy(y)
+
+    def from_numpy(self, x):
+        return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
+
+    def warmup(self, imgsz=(1, 3, 640, 640)):
+        # Warmup model by running inference once
+        warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton
+        if any(warmup_types) and (self.device.type != 'cpu' or self.triton):
+            im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device)  # input
+            for _ in range(2 if self.jit else 1):  #
+                self.forward(im)  # warmup
+
+    @staticmethod
+    def _model_type(p='path/to/model.pt'):
+        # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
+        # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
+        from export import export_formats
+        from utils.downloads import is_url
+        sf = list(export_formats().Suffix)  # export suffixes
+        if not is_url(p, check=False):
+            check_suffix(p, sf)  # checks
+        url = urlparse(p)  # if url may be Triton inference server
+        types = [s in Path(p).name for s in sf]
+        types[8] &= not types[9]  # tflite &= not edgetpu
+        triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])
+        return types + [triton]
+
+    @staticmethod
+    def _load_metadata(f=Path('path/to/meta.yaml')):
+        # Load metadata from meta.yaml if it exists
+        if f.exists():
+            d = yaml_load(f)
+            return d['stride'], d['names']  # assign stride, names
+        return None, None
+
+
+class AutoShape(nn.Module):
+    # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
+    conf = 0.25  # NMS confidence threshold
+    iou = 0.45  # NMS IoU threshold
+    agnostic = False  # NMS class-agnostic
+    multi_label = False  # NMS multiple labels per box
+    classes = None  # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
+    max_det = 1000  # maximum number of detections per image
+    amp = False  # Automatic Mixed Precision (AMP) inference
+
+    def __init__(self, model, verbose=True):
+        super().__init__()
+        if verbose:
+            LOGGER.info('Adding AutoShape... ')
+        copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=())  # copy attributes
+        self.dmb = isinstance(model, DetectMultiBackend)  # DetectMultiBackend() instance
+        self.pt = not self.dmb or model.pt  # PyTorch model
+        self.model = model.eval()
+        if self.pt:
+            m = self.model.model.model[-1] if self.dmb else self.model.model[-1]  # Detect()
+            m.inplace = False  # Detect.inplace=False for safe multithread inference
+            m.export = True  # do not output loss values
+
+    def _apply(self, fn):
+        # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
+        self = super()._apply(fn)
+        if self.pt:
+            m = self.model.model.model[-1] if self.dmb else self.model.model[-1]  # Detect()
+            m.stride = fn(m.stride)
+            m.grid = list(map(fn, m.grid))
+            if isinstance(m.anchor_grid, list):
+                m.anchor_grid = list(map(fn, m.anchor_grid))
+        return self
+
+    @smart_inference_mode()
+    def forward(self, ims, size=640, augment=False, profile=False):
+        # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:
+        #   file:        ims = 'data/images/zidane.jpg'  # str or PosixPath
+        #   URI:             = 'https://ultralytics.com/images/zidane.jpg'
+        #   OpenCV:          = cv2.imread('image.jpg')[:,:,::-1]  # HWC BGR to RGB x(640,1280,3)
+        #   PIL:             = Image.open('image.jpg') or ImageGrab.grab()  # HWC x(640,1280,3)
+        #   numpy:           = np.zeros((640,1280,3))  # HWC
+        #   torch:           = torch.zeros(16,3,320,640)  # BCHW (scaled to size=640, 0-1 values)
+        #   multiple:        = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...]  # list of images
+
+        dt = (Profile(), Profile(), Profile())
+        with dt[0]:
+            if isinstance(size, int):  # expand
+                size = (size, size)
+            p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device)  # param
+            autocast = self.amp and (p.device.type != 'cpu')  # Automatic Mixed Precision (AMP) inference
+            if isinstance(ims, torch.Tensor):  # torch
+                with amp.autocast(autocast):
+                    return self.model(ims.to(p.device).type_as(p), augment=augment)  # inference
+
+            # Pre-process
+            n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims])  # number, list of images
+            shape0, shape1, files = [], [], []  # image and inference shapes, filenames
+            for i, im in enumerate(ims):
+                f = f'image{i}'  # filename
+                if isinstance(im, (str, Path)):  # filename or uri
+                    im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
+                    im = np.asarray(exif_transpose(im))
+                elif isinstance(im, Image.Image):  # PIL Image
+                    im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
+                files.append(Path(f).with_suffix('.jpg').name)
+                if im.shape[0] < 5:  # image in CHW
+                    im = im.transpose((1, 2, 0))  # reverse dataloader .transpose(2, 0, 1)
+                im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)  # enforce 3ch input
+                s = im.shape[:2]  # HWC
+                shape0.append(s)  # image shape
+                g = max(size) / max(s)  # gain
+                shape1.append([int(y * g) for y in s])
+                ims[i] = im if im.data.contiguous else np.ascontiguousarray(im)  # update
+            shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)]  # inf shape
+            x = [letterbox(im, shape1, auto=False)[0] for im in ims]  # pad
+            x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2)))  # stack and BHWC to BCHW
+            x = torch.from_numpy(x).to(p.device).type_as(p) / 255  # uint8 to fp16/32
+
+        with amp.autocast(autocast):
+            # Inference
+            with dt[1]:
+                y = self.model(x, augment=augment)  # forward
+
+            # Post-process
+            with dt[2]:
+                y = non_max_suppression(y if self.dmb else y[0],
+                                        self.conf,
+                                        self.iou,
+                                        self.classes,
+                                        self.agnostic,
+                                        self.multi_label,
+                                        max_det=self.max_det)  # NMS
+                for i in range(n):
+                    scale_boxes(shape1, y[i][:, :4], shape0[i])
+
+            return Detections(ims, y, files, dt, self.names, x.shape)
+
+
+class Detections:
+    # YOLOv5 detections class for inference results
+    def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
+        super().__init__()
+        d = pred[0].device  # device
+        gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims]  # normalizations
+        self.ims = ims  # list of images as numpy arrays
+        self.pred = pred  # list of tensors pred[0] = (xyxy, conf, cls)
+        self.names = names  # class names
+        self.files = files  # image filenames
+        self.times = times  # profiling times
+        self.xyxy = pred  # xyxy pixels
+        self.xywh = [xyxy2xywh(x) for x in pred]  # xywh pixels
+        self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)]  # xyxy normalized
+        self.xywhn = [x / g for x, g in zip(self.xywh, gn)]  # xywh normalized
+        self.n = len(self.pred)  # number of images (batch size)
+        self.t = tuple(x.t / self.n * 1E3 for x in times)  # timestamps (ms)
+        self.s = tuple(shape)  # inference BCHW shape
+
+    def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):
+        s, crops = '', []
+        for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
+            s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '  # string
+            if pred.shape[0]:
+                for c in pred[:, -1].unique():
+                    n = (pred[:, -1] == c).sum()  # detections per class
+                    s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, "  # add to string
+                s = s.rstrip(', ')
+                if show or save or render or crop:
+                    annotator = Annotator(im, example=str(self.names))
+                    for *box, conf, cls in reversed(pred):  # xyxy, confidence, class
+                        label = f'{self.names[int(cls)]} {conf:.2f}'
+                        if crop:
+                            file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
+                            crops.append({
+                                'box': box,
+                                'conf': conf,
+                                'cls': cls,
+                                'label': label,
+                                'im': save_one_box(box, im, file=file, save=save)})
+                        else:  # all others
+                            annotator.box_label(box, label if labels else '', color=colors(cls))
+                    im = annotator.im
+            else:
+                s += '(no detections)'
+
+            im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im  # from np
+            if show:
+                if is_jupyter():
+                    from IPython.display import display
+                    display(im)
+                else:
+                    im.show(self.files[i])
+            if save:
+                f = self.files[i]
+                im.save(save_dir / f)  # save
+                if i == self.n - 1:
+                    LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
+            if render:
+                self.ims[i] = np.asarray(im)
+        if pprint:
+            s = s.lstrip('\n')
+            return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t
+        if crop:
+            if save:
+                LOGGER.info(f'Saved results to {save_dir}\n')
+            return crops
+
+    @TryExcept('Showing images is not supported in this environment')
+    def show(self, labels=True):
+        self._run(show=True, labels=labels)  # show results
+
+    def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False):
+        save_dir = increment_path(save_dir, exist_ok, mkdir=True)  # increment save_dir
+        self._run(save=True, labels=labels, save_dir=save_dir)  # save results
+
+    def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False):
+        save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
+        return self._run(crop=True, save=save, save_dir=save_dir)  # crop results
+
+    def render(self, labels=True):
+        self._run(render=True, labels=labels)  # render results
+        return self.ims
+
+    def pandas(self):
+        # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
+        new = copy(self)  # return copy
+        ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name'  # xyxy columns
+        cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name'  # xywh columns
+        for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
+            a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)]  # update
+            setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
+        return new
+
+    def tolist(self):
+        # return a list of Detections objects, i.e. 'for result in results.tolist():'
+        r = range(self.n)  # iterable
+        x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
+        # for d in x:
+        #    for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
+        #        setattr(d, k, getattr(d, k)[0])  # pop out of list
+        return x
+
+    def print(self):
+        LOGGER.info(self.__str__())
+
+    def __len__(self):  # override len(results)
+        return self.n
+
+    def __str__(self):  # override print(results)
+        return self._run(pprint=True)  # print results
+
+    def __repr__(self):
+        return f'YOLOv5 {self.__class__} instance\n' + self.__str__()
+
+
+class Proto(nn.Module):
+    # YOLOv5 mask Proto module for segmentation models
+    def __init__(self, c1, c_=256, c2=32):  # ch_in, number of protos, number of masks
+        super().__init__()
+        self.cv1 = Conv(c1, c_, k=3)
+        self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
+        self.cv2 = Conv(c_, c_, k=3)
+        self.cv3 = Conv(c_, c2)
+
+    def forward(self, x):
+        return self.cv3(self.cv2(self.upsample(self.cv1(x))))
+
+
+class Classify(nn.Module):
+    # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
+    def __init__(self,
+                 c1,
+                 c2,
+                 k=1,
+                 s=1,
+                 p=None,
+                 g=1,
+                 dropout_p=0.0):  # ch_in, ch_out, kernel, stride, padding, groups, dropout probability
+        super().__init__()
+        c_ = 1280  # efficientnet_b0 size
+        self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
+        self.pool = nn.AdaptiveAvgPool2d(1)  # to x(b,c_,1,1)
+        self.drop = nn.Dropout(p=dropout_p, inplace=True)
+        self.linear = nn.Linear(c_, c2)  # to x(b,c2)
+
+    def forward(self, x):
+        if isinstance(x, list):
+            x = torch.cat(x, 1)
+        return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))

+ 111 - 0
models/experimental.py

@@ -0,0 +1,111 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Experimental modules
+"""
+import math
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+from utils.downloads import attempt_download
+
+
+class Sum(nn.Module):
+    # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
+    def __init__(self, n, weight=False):  # n: number of inputs
+        super().__init__()
+        self.weight = weight  # apply weights boolean
+        self.iter = range(n - 1)  # iter object
+        if weight:
+            self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True)  # layer weights
+
+    def forward(self, x):
+        y = x[0]  # no weight
+        if self.weight:
+            w = torch.sigmoid(self.w) * 2
+            for i in self.iter:
+                y = y + x[i + 1] * w[i]
+        else:
+            for i in self.iter:
+                y = y + x[i + 1]
+        return y
+
+
+class MixConv2d(nn.Module):
+    # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
+    def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):  # ch_in, ch_out, kernel, stride, ch_strategy
+        super().__init__()
+        n = len(k)  # number of convolutions
+        if equal_ch:  # equal c_ per group
+            i = torch.linspace(0, n - 1E-6, c2).floor()  # c2 indices
+            c_ = [(i == g).sum() for g in range(n)]  # intermediate channels
+        else:  # equal weight.numel() per group
+            b = [c2] + [0] * n
+            a = np.eye(n + 1, n, k=-1)
+            a -= np.roll(a, 1, axis=1)
+            a *= np.array(k) ** 2
+            a[0] = 1
+            c_ = np.linalg.lstsq(a, b, rcond=None)[0].round()  # solve for equal weight indices, ax = b
+
+        self.m = nn.ModuleList([
+            nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
+        self.bn = nn.BatchNorm2d(c2)
+        self.act = nn.SiLU()
+
+    def forward(self, x):
+        return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
+
+
+class Ensemble(nn.ModuleList):
+    # Ensemble of models
+    def __init__(self):
+        super().__init__()
+
+    def forward(self, x, augment=False, profile=False, visualize=False):
+        y = [module(x, augment, profile, visualize)[0] for module in self]
+        # y = torch.stack(y).max(0)[0]  # max ensemble
+        # y = torch.stack(y).mean(0)  # mean ensemble
+        y = torch.cat(y, 1)  # nms ensemble
+        return y, None  # inference, train output
+
+
+def attempt_load(weights, device=None, inplace=True, fuse=True):
+    # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
+    from models.yolo import Detect, Model
+
+    model = Ensemble()
+    for w in weights if isinstance(weights, list) else [weights]:
+        ckpt = torch.load(attempt_download(w), map_location='cpu')  # load
+        ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float()  # FP32 model
+
+        # Model compatibility updates
+        if not hasattr(ckpt, 'stride'):
+            ckpt.stride = torch.tensor([32.])
+        if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
+            ckpt.names = dict(enumerate(ckpt.names))  # convert to dict
+
+        model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval())  # model in eval mode
+
+    # Module compatibility updates
+    for m in model.modules():
+        t = type(m)
+        if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
+            m.inplace = inplace  # torch 1.7.0 compatibility
+            if t is Detect and not isinstance(m.anchor_grid, list):
+                delattr(m, 'anchor_grid')
+                setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
+        elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
+            m.recompute_scale_factor = None  # torch 1.11.0 compatibility
+
+    # Return model
+    if len(model) == 1:
+        return model[-1]
+
+    # Return detection ensemble
+    print(f'Ensemble created with {weights}\n')
+    for k in 'names', 'nc', 'yaml':
+        setattr(model, k, getattr(model[0], k))
+    model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride  # max stride
+    assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
+    return model

+ 59 - 0
models/hub/anchors.yaml

@@ -0,0 +1,59 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+# Default anchors for COCO data
+
+
+# P5 -------------------------------------------------------------------------------------------------------------------
+# P5-640:
+anchors_p5_640:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+
+# P6 -------------------------------------------------------------------------------------------------------------------
+# P6-640:  thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11,  21,19,  17,41,  43,32,  39,70,  86,64,  65,131,  134,130,  120,265,  282,180,  247,354,  512,387
+anchors_p6_640:
+  - [9,11,  21,19,  17,41]  # P3/8
+  - [43,32,  39,70,  86,64]  # P4/16
+  - [65,131,  134,130,  120,265]  # P5/32
+  - [282,180,  247,354,  512,387]  # P6/64
+
+# P6-1280:  thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27,  44,40,  38,94,  96,68,  86,152,  180,137,  140,301,  303,264,  238,542,  436,615,  739,380,  925,792
+anchors_p6_1280:
+  - [19,27,  44,40,  38,94]  # P3/8
+  - [96,68,  86,152,  180,137]  # P4/16
+  - [140,301,  303,264,  238,542]  # P5/32
+  - [436,615,  739,380,  925,792]  # P6/64
+
+# P6-1920:  thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41,  67,59,  57,141,  144,103,  129,227,  270,205,  209,452,  455,396,  358,812,  653,922,  1109,570,  1387,1187
+anchors_p6_1920:
+  - [28,41,  67,59,  57,141]  # P3/8
+  - [144,103,  129,227,  270,205]  # P4/16
+  - [209,452,  455,396,  358,812]  # P5/32
+  - [653,922,  1109,570,  1387,1187]  # P6/64
+
+
+# P7 -------------------------------------------------------------------------------------------------------------------
+# P7-640:  thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11,  13,30,  29,20,  30,46,  61,38,  39,92,  78,80,  146,66,  79,163,  149,150,  321,143,  157,303,  257,402,  359,290,  524,372
+anchors_p7_640:
+  - [11,11,  13,30,  29,20]  # P3/8
+  - [30,46,  61,38,  39,92]  # P4/16
+  - [78,80,  146,66,  79,163]  # P5/32
+  - [149,150,  321,143,  157,303]  # P6/64
+  - [257,402,  359,290,  524,372]  # P7/128
+
+# P7-1280:  thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22,  54,36,  32,77,  70,83,  138,71,  75,173,  165,159,  148,334,  375,151,  334,317,  251,626,  499,474,  750,326,  534,814,  1079,818
+anchors_p7_1280:
+  - [19,22,  54,36,  32,77]  # P3/8
+  - [70,83,  138,71,  75,173]  # P4/16
+  - [165,159,  148,334,  375,151]  # P5/32
+  - [334,317,  251,626,  499,474]  # P6/64
+  - [750,326,  534,814,  1079,818]  # P7/128
+
+# P7-1920:  thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34,  81,55,  47,115,  105,124,  207,107,  113,259,  247,238,  222,500,  563,227,  501,476,  376,939,  749,711,  1126,489,  801,1222,  1618,1227
+anchors_p7_1920:
+  - [29,34,  81,55,  47,115]  # P3/8
+  - [105,124,  207,107,  113,259]  # P4/16
+  - [247,238,  222,500,  563,227]  # P5/32
+  - [501,476,  376,939,  749,711]  # P6/64
+  - [1126,489,  801,1222,  1618,1227]  # P7/128

+ 51 - 0
models/hub/yolov3-spp.yaml

@@ -0,0 +1,51 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# darknet53 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [32, 3, 1]],  # 0
+   [-1, 1, Conv, [64, 3, 2]],  # 1-P1/2
+   [-1, 1, Bottleneck, [64]],
+   [-1, 1, Conv, [128, 3, 2]],  # 3-P2/4
+   [-1, 2, Bottleneck, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 5-P3/8
+   [-1, 8, Bottleneck, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 7-P4/16
+   [-1, 8, Bottleneck, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 9-P5/32
+   [-1, 4, Bottleneck, [1024]],  # 10
+  ]
+
+# YOLOv3-SPP head
+head:
+  [[-1, 1, Bottleneck, [1024, False]],
+   [-1, 1, SPP, [512, [5, 9, 13]]],
+   [-1, 1, Conv, [1024, 3, 1]],
+   [-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, Conv, [1024, 3, 1]],  # 15 (P5/32-large)
+
+   [-2, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 8], 1, Concat, [1]],  # cat backbone P4
+   [-1, 1, Bottleneck, [512, False]],
+   [-1, 1, Bottleneck, [512, False]],
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, Conv, [512, 3, 1]],  # 22 (P4/16-medium)
+
+   [-2, 1, Conv, [128, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P3
+   [-1, 1, Bottleneck, [256, False]],
+   [-1, 2, Bottleneck, [256, False]],  # 27 (P3/8-small)
+
+   [[27, 22, 15], 1, Detect, [nc, anchors]],   # Detect(P3, P4, P5)
+  ]

+ 41 - 0
models/hub/yolov3-tiny.yaml

@@ -0,0 +1,41 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors:
+  - [10,14, 23,27, 37,58]  # P4/16
+  - [81,82, 135,169, 344,319]  # P5/32
+
+# YOLOv3-tiny backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [16, 3, 1]],  # 0
+   [-1, 1, nn.MaxPool2d, [2, 2, 0]],  # 1-P1/2
+   [-1, 1, Conv, [32, 3, 1]],
+   [-1, 1, nn.MaxPool2d, [2, 2, 0]],  # 3-P2/4
+   [-1, 1, Conv, [64, 3, 1]],
+   [-1, 1, nn.MaxPool2d, [2, 2, 0]],  # 5-P3/8
+   [-1, 1, Conv, [128, 3, 1]],
+   [-1, 1, nn.MaxPool2d, [2, 2, 0]],  # 7-P4/16
+   [-1, 1, Conv, [256, 3, 1]],
+   [-1, 1, nn.MaxPool2d, [2, 2, 0]],  # 9-P5/32
+   [-1, 1, Conv, [512, 3, 1]],
+   [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]],  # 11
+   [-1, 1, nn.MaxPool2d, [2, 1, 0]],  # 12
+  ]
+
+# YOLOv3-tiny head
+head:
+  [[-1, 1, Conv, [1024, 3, 1]],
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, Conv, [512, 3, 1]],  # 15 (P5/32-large)
+
+   [-2, 1, Conv, [128, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 8], 1, Concat, [1]],  # cat backbone P4
+   [-1, 1, Conv, [256, 3, 1]],  # 19 (P4/16-medium)
+
+   [[19, 15], 1, Detect, [nc, anchors]],  # Detect(P4, P5)
+  ]

+ 51 - 0
models/hub/yolov3.yaml

@@ -0,0 +1,51 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# darknet53 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [32, 3, 1]],  # 0
+   [-1, 1, Conv, [64, 3, 2]],  # 1-P1/2
+   [-1, 1, Bottleneck, [64]],
+   [-1, 1, Conv, [128, 3, 2]],  # 3-P2/4
+   [-1, 2, Bottleneck, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 5-P3/8
+   [-1, 8, Bottleneck, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 7-P4/16
+   [-1, 8, Bottleneck, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 9-P5/32
+   [-1, 4, Bottleneck, [1024]],  # 10
+  ]
+
+# YOLOv3 head
+head:
+  [[-1, 1, Bottleneck, [1024, False]],
+   [-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, Conv, [1024, 3, 1]],
+   [-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, Conv, [1024, 3, 1]],  # 15 (P5/32-large)
+
+   [-2, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 8], 1, Concat, [1]],  # cat backbone P4
+   [-1, 1, Bottleneck, [512, False]],
+   [-1, 1, Bottleneck, [512, False]],
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, Conv, [512, 3, 1]],  # 22 (P4/16-medium)
+
+   [-2, 1, Conv, [128, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P3
+   [-1, 1, Bottleneck, [256, False]],
+   [-1, 2, Bottleneck, [256, False]],  # 27 (P3/8-small)
+
+   [[27, 22, 15], 1, Detect, [nc, anchors]],   # Detect(P3, P4, P5)
+  ]

+ 48 - 0
models/hub/yolov5-bifpn.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 BiFPN head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14, 6], 1, Concat, [1]],  # cat P4 <--- BiFPN change
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
+  ]

+ 42 - 0
models/hub/yolov5-fpn.yaml

@@ -0,0 +1,42 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 FPN head
+head:
+  [[-1, 3, C3, [1024, False]],  # 10 (P5/32-large)
+
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 1, Conv, [512, 1, 1]],
+   [-1, 3, C3, [512, False]],  # 14 (P4/16-medium)
+
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 3, C3, [256, False]],  # 18 (P3/8-small)
+
+   [[18, 14, 10], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
+  ]

+ 54 - 0
models/hub/yolov5-p2.yaml

@@ -0,0 +1,54 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors: 3  # AutoAnchor evolves 3 anchors per P output layer
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [128, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 2], 1, Concat, [1]],  # cat backbone P2
+   [-1, 1, C3, [128, False]],  # 21 (P2/4-xsmall)
+
+   [-1, 1, Conv, [128, 3, 2]],
+   [[-1, 18], 1, Concat, [1]],  # cat head P3
+   [-1, 3, C3, [256, False]],  # 24 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 27 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 30 (P5/32-large)
+
+   [[21, 24, 27, 30], 1, Detect, [nc, anchors]],  # Detect(P2, P3, P4, P5)
+  ]

+ 41 - 0
models/hub/yolov5-p34.yaml

@@ -0,0 +1,41 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.33  # model depth multiple
+width_multiple: 0.50  # layer channel multiple
+anchors: 3  # AutoAnchor evolves 3 anchors per P output layer
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ],  # 0-P1/2
+    [ -1, 1, Conv, [ 128, 3, 2 ] ],  # 1-P2/4
+    [ -1, 3, C3, [ 128 ] ],
+    [ -1, 1, Conv, [ 256, 3, 2 ] ],  # 3-P3/8
+    [ -1, 6, C3, [ 256 ] ],
+    [ -1, 1, Conv, [ 512, 3, 2 ] ],  # 5-P4/16
+    [ -1, 9, C3, [ 512 ] ],
+    [ -1, 1, Conv, [ 1024, 3, 2 ] ],  # 7-P5/32
+    [ -1, 3, C3, [ 1024 ] ],
+    [ -1, 1, SPPF, [ 1024, 5 ] ],  # 9
+  ]
+
+# YOLOv5 v6.0 head with (P3, P4) outputs
+head:
+  [ [ -1, 1, Conv, [ 512, 1, 1 ] ],
+    [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+    [ [ -1, 6 ], 1, Concat, [ 1 ] ],  # cat backbone P4
+    [ -1, 3, C3, [ 512, False ] ],  # 13
+
+    [ -1, 1, Conv, [ 256, 1, 1 ] ],
+    [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+    [ [ -1, 4 ], 1, Concat, [ 1 ] ],  # cat backbone P3
+    [ -1, 3, C3, [ 256, False ] ],  # 17 (P3/8-small)
+
+    [ -1, 1, Conv, [ 256, 3, 2 ] ],
+    [ [ -1, 14 ], 1, Concat, [ 1 ] ],  # cat head P4
+    [ -1, 3, C3, [ 512, False ] ],  # 20 (P4/16-medium)
+
+    [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ],  # Detect(P3, P4)
+  ]

+ 56 - 0
models/hub/yolov5-p6.yaml

@@ -0,0 +1,56 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors: 3  # AutoAnchor evolves 3 anchors per P output layer
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [768]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 11
+  ]
+
+# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs
+head:
+  [[-1, 1, Conv, [768, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 8], 1, Concat, [1]],  # cat backbone P5
+   [-1, 3, C3, [768, False]],  # 15
+
+   [-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 19
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 20], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 16], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)
+
+   [-1, 1, Conv, [768, 3, 2]],
+   [[-1, 12], 1, Concat, [1]],  # cat head P6
+   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)
+
+   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)
+  ]

+ 67 - 0
models/hub/yolov5-p7.yaml

@@ -0,0 +1,67 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors: 3  # AutoAnchor evolves 3 anchors per P output layer
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [768]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64
+   [-1, 3, C3, [1024]],
+   [-1, 1, Conv, [1280, 3, 2]],  # 11-P7/128
+   [-1, 3, C3, [1280]],
+   [-1, 1, SPPF, [1280, 5]],  # 13
+  ]
+
+# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs
+head:
+  [[-1, 1, Conv, [1024, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 10], 1, Concat, [1]],  # cat backbone P6
+   [-1, 3, C3, [1024, False]],  # 17
+
+   [-1, 1, Conv, [768, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 8], 1, Concat, [1]],  # cat backbone P5
+   [-1, 3, C3, [768, False]],  # 21
+
+   [-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 25
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 29 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 26], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 32 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 22], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [768, False]],  # 35 (P5/32-large)
+
+   [-1, 1, Conv, [768, 3, 2]],
+   [[-1, 18], 1, Concat, [1]],  # cat head P6
+   [-1, 3, C3, [1024, False]],  # 38 (P6/64-xlarge)
+
+   [-1, 1, Conv, [1024, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P7
+   [-1, 3, C3, [1280, False]],  # 41 (P7/128-xxlarge)
+
+   [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6, P7)
+  ]

+ 48 - 0
models/hub/yolov5-panet.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 PANet head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
+  ]

+ 60 - 0
models/hub/yolov5l6.yaml

@@ -0,0 +1,60 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors:
+  - [19,27,  44,40,  38,94]  # P3/8
+  - [96,68,  86,152,  180,137]  # P4/16
+  - [140,301,  303,264,  238,542]  # P5/32
+  - [436,615,  739,380,  925,792]  # P6/64
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [768]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 11
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [768, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 8], 1, Concat, [1]],  # cat backbone P5
+   [-1, 3, C3, [768, False]],  # 15
+
+   [-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 19
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 20], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 16], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)
+
+   [-1, 1, Conv, [768, 3, 2]],
+   [[-1, 12], 1, Concat, [1]],  # cat head P6
+   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)
+
+   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)
+  ]

+ 60 - 0
models/hub/yolov5m6.yaml

@@ -0,0 +1,60 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.67  # model depth multiple
+width_multiple: 0.75  # layer channel multiple
+anchors:
+  - [19,27,  44,40,  38,94]  # P3/8
+  - [96,68,  86,152,  180,137]  # P4/16
+  - [140,301,  303,264,  238,542]  # P5/32
+  - [436,615,  739,380,  925,792]  # P6/64
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [768]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 11
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [768, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 8], 1, Concat, [1]],  # cat backbone P5
+   [-1, 3, C3, [768, False]],  # 15
+
+   [-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 19
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 20], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 16], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)
+
+   [-1, 1, Conv, [768, 3, 2]],
+   [[-1, 12], 1, Concat, [1]],  # cat head P6
+   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)
+
+   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)
+  ]

+ 60 - 0
models/hub/yolov5n6.yaml

@@ -0,0 +1,60 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.33  # model depth multiple
+width_multiple: 0.25  # layer channel multiple
+anchors:
+  - [19,27,  44,40,  38,94]  # P3/8
+  - [96,68,  86,152,  180,137]  # P4/16
+  - [140,301,  303,264,  238,542]  # P5/32
+  - [436,615,  739,380,  925,792]  # P6/64
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [768]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 11
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [768, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 8], 1, Concat, [1]],  # cat backbone P5
+   [-1, 3, C3, [768, False]],  # 15
+
+   [-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 19
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 20], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 16], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)
+
+   [-1, 1, Conv, [768, 3, 2]],
+   [[-1, 12], 1, Concat, [1]],  # cat head P6
+   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)
+
+   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)
+  ]

+ 49 - 0
models/hub/yolov5s-LeakyReLU.yaml

@@ -0,0 +1,49 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+activation: nn.LeakyReLU(0.1)  # <----- Conv() activation used throughout entire YOLOv5 model
+depth_multiple: 0.33  # model depth multiple
+width_multiple: 0.50  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
+  ]

+ 48 - 0
models/hub/yolov5s-ghost.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.33  # model depth multiple
+width_multiple: 0.50  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, GhostConv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3Ghost, [128]],
+   [-1, 1, GhostConv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3Ghost, [256]],
+   [-1, 1, GhostConv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3Ghost, [512]],
+   [-1, 1, GhostConv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3Ghost, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, GhostConv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3Ghost, [512, False]],  # 13
+
+   [-1, 1, GhostConv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3Ghost, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, GhostConv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3Ghost, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, GhostConv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3Ghost, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
+  ]

+ 48 - 0
models/hub/yolov5s-transformer.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.33  # model depth multiple
+width_multiple: 0.50  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3TR, [1024]],  # 9 <--- C3TR() Transformer module
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
+  ]

+ 60 - 0
models/hub/yolov5s6.yaml

@@ -0,0 +1,60 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.33  # model depth multiple
+width_multiple: 0.50  # layer channel multiple
+anchors:
+  - [19,27,  44,40,  38,94]  # P3/8
+  - [96,68,  86,152,  180,137]  # P4/16
+  - [140,301,  303,264,  238,542]  # P5/32
+  - [436,615,  739,380,  925,792]  # P6/64
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [768]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 11
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [768, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 8], 1, Concat, [1]],  # cat backbone P5
+   [-1, 3, C3, [768, False]],  # 15
+
+   [-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 19
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 20], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 16], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)
+
+   [-1, 1, Conv, [768, 3, 2]],
+   [[-1, 12], 1, Concat, [1]],  # cat head P6
+   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)
+
+   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)
+  ]

+ 60 - 0
models/hub/yolov5x6.yaml

@@ -0,0 +1,60 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.33  # model depth multiple
+width_multiple: 1.25  # layer channel multiple
+anchors:
+  - [19,27,  44,40,  38,94]  # P3/8
+  - [96,68,  86,152,  180,137]  # P4/16
+  - [140,301,  303,264,  238,542]  # P5/32
+  - [436,615,  739,380,  925,792]  # P6/64
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [768]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 11
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [768, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 8], 1, Concat, [1]],  # cat backbone P5
+   [-1, 3, C3, [768, False]],  # 15
+
+   [-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 19
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 20], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 16], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)
+
+   [-1, 1, Conv, [768, 3, 2]],
+   [[-1, 12], 1, Concat, [1]],  # cat head P6
+   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)
+
+   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)
+  ]

+ 48 - 0
models/segment/yolov5l-seg.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]],  # Detect(P3, P4, P5)
+  ]

+ 48 - 0
models/segment/yolov5m-seg.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.67  # model depth multiple
+width_multiple: 0.75  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]],  # Detect(P3, P4, P5)
+  ]

+ 48 - 0
models/segment/yolov5n-seg.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.33  # model depth multiple
+width_multiple: 0.25  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]],  # Detect(P3, P4, P5)
+  ]

+ 48 - 0
models/segment/yolov5s-seg.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.33  # model depth multiple
+width_multiple: 0.5  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]],  # Detect(P3, P4, P5)
+  ]

+ 48 - 0
models/segment/yolov5x-seg.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.33  # model depth multiple
+width_multiple: 1.25  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]],  # Detect(P3, P4, P5)
+  ]

+ 608 - 0
models/tf.py

@@ -0,0 +1,608 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+TensorFlow, Keras and TFLite versions of YOLOv5
+Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
+
+Usage:
+    $ python models/tf.py --weights yolov5s.pt
+
+Export:
+    $ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs
+"""
+
+import argparse
+import sys
+from copy import deepcopy
+from pathlib import Path
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+# ROOT = ROOT.relative_to(Path.cwd())  # relative
+
+import numpy as np
+import tensorflow as tf
+import torch
+import torch.nn as nn
+from tensorflow import keras
+
+from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv,
+                           DWConvTranspose2d, Focus, autopad)
+from models.experimental import MixConv2d, attempt_load
+from models.yolo import Detect, Segment
+from utils.activations import SiLU
+from utils.general import LOGGER, make_divisible, print_args
+
+
+class TFBN(keras.layers.Layer):
+    # TensorFlow BatchNormalization wrapper
+    def __init__(self, w=None):
+        super().__init__()
+        self.bn = keras.layers.BatchNormalization(
+            beta_initializer=keras.initializers.Constant(w.bias.numpy()),
+            gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
+            moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),
+            moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),
+            epsilon=w.eps)
+
+    def call(self, inputs):
+        return self.bn(inputs)
+
+
+class TFPad(keras.layers.Layer):
+    # Pad inputs in spatial dimensions 1 and 2
+    def __init__(self, pad):
+        super().__init__()
+        if isinstance(pad, int):
+            self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
+        else:  # tuple/list
+            self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]])
+
+    def call(self, inputs):
+        return tf.pad(inputs, self.pad, mode='constant', constant_values=0)
+
+
+class TFConv(keras.layers.Layer):
+    # Standard convolution
+    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
+        # ch_in, ch_out, weights, kernel, stride, padding, groups
+        super().__init__()
+        assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
+        # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
+        # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch
+        conv = keras.layers.Conv2D(
+            filters=c2,
+            kernel_size=k,
+            strides=s,
+            padding='SAME' if s == 1 else 'VALID',
+            use_bias=not hasattr(w, 'bn'),
+            kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
+            bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
+        self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
+        self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
+        self.act = activations(w.act) if act else tf.identity
+
+    def call(self, inputs):
+        return self.act(self.bn(self.conv(inputs)))
+
+
+class TFDWConv(keras.layers.Layer):
+    # Depthwise convolution
+    def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
+        # ch_in, ch_out, weights, kernel, stride, padding, groups
+        super().__init__()
+        assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels'
+        conv = keras.layers.DepthwiseConv2D(
+            kernel_size=k,
+            depth_multiplier=c2 // c1,
+            strides=s,
+            padding='SAME' if s == 1 else 'VALID',
+            use_bias=not hasattr(w, 'bn'),
+            depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
+            bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
+        self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
+        self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
+        self.act = activations(w.act) if act else tf.identity
+
+    def call(self, inputs):
+        return self.act(self.bn(self.conv(inputs)))
+
+
+class TFDWConvTranspose2d(keras.layers.Layer):
+    # Depthwise ConvTranspose2d
+    def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
+        # ch_in, ch_out, weights, kernel, stride, padding, groups
+        super().__init__()
+        assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels'
+        assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1'
+        weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy()
+        self.c1 = c1
+        self.conv = [
+            keras.layers.Conv2DTranspose(filters=1,
+                                         kernel_size=k,
+                                         strides=s,
+                                         padding='VALID',
+                                         output_padding=p2,
+                                         use_bias=True,
+                                         kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]),
+                                         bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)]
+
+    def call(self, inputs):
+        return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1]
+
+
+class TFFocus(keras.layers.Layer):
+    # Focus wh information into c-space
+    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
+        # ch_in, ch_out, kernel, stride, padding, groups
+        super().__init__()
+        self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
+
+    def call(self, inputs):  # x(b,w,h,c) -> y(b,w/2,h/2,4c)
+        # inputs = inputs / 255  # normalize 0-255 to 0-1
+        inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]]
+        return self.conv(tf.concat(inputs, 3))
+
+
+class TFBottleneck(keras.layers.Layer):
+    # Standard bottleneck
+    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None):  # ch_in, ch_out, shortcut, groups, expansion
+        super().__init__()
+        c_ = int(c2 * e)  # hidden channels
+        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+        self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
+        self.add = shortcut and c1 == c2
+
+    def call(self, inputs):
+        return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
+
+
+class TFCrossConv(keras.layers.Layer):
+    # Cross Convolution
+    def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
+        super().__init__()
+        c_ = int(c2 * e)  # hidden channels
+        self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1)
+        self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2)
+        self.add = shortcut and c1 == c2
+
+    def call(self, inputs):
+        return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
+
+
+class TFConv2d(keras.layers.Layer):
+    # Substitution for PyTorch nn.Conv2D
+    def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
+        super().__init__()
+        assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
+        self.conv = keras.layers.Conv2D(filters=c2,
+                                        kernel_size=k,
+                                        strides=s,
+                                        padding='VALID',
+                                        use_bias=bias,
+                                        kernel_initializer=keras.initializers.Constant(
+                                            w.weight.permute(2, 3, 1, 0).numpy()),
+                                        bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None)
+
+    def call(self, inputs):
+        return self.conv(inputs)
+
+
+class TFBottleneckCSP(keras.layers.Layer):
+    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
+    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
+        # ch_in, ch_out, number, shortcut, groups, expansion
+        super().__init__()
+        c_ = int(c2 * e)  # hidden channels
+        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+        self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
+        self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)
+        self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)
+        self.bn = TFBN(w.bn)
+        self.act = lambda x: keras.activations.swish(x)
+        self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
+
+    def call(self, inputs):
+        y1 = self.cv3(self.m(self.cv1(inputs)))
+        y2 = self.cv2(inputs)
+        return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))
+
+
+class TFC3(keras.layers.Layer):
+    # CSP Bottleneck with 3 convolutions
+    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
+        # ch_in, ch_out, number, shortcut, groups, expansion
+        super().__init__()
+        c_ = int(c2 * e)  # hidden channels
+        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+        self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
+        self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
+        self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
+
+    def call(self, inputs):
+        return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
+
+
+class TFC3x(keras.layers.Layer):
+    # 3 module with cross-convolutions
+    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
+        # ch_in, ch_out, number, shortcut, groups, expansion
+        super().__init__()
+        c_ = int(c2 * e)  # hidden channels
+        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+        self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
+        self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
+        self.m = keras.Sequential([
+            TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)])
+
+    def call(self, inputs):
+        return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
+
+
+class TFSPP(keras.layers.Layer):
+    # Spatial pyramid pooling layer used in YOLOv3-SPP
+    def __init__(self, c1, c2, k=(5, 9, 13), w=None):
+        super().__init__()
+        c_ = c1 // 2  # hidden channels
+        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+        self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
+        self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k]
+
+    def call(self, inputs):
+        x = self.cv1(inputs)
+        return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))
+
+
+class TFSPPF(keras.layers.Layer):
+    # Spatial pyramid pooling-Fast layer
+    def __init__(self, c1, c2, k=5, w=None):
+        super().__init__()
+        c_ = c1 // 2  # hidden channels
+        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
+        self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
+        self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME')
+
+    def call(self, inputs):
+        x = self.cv1(inputs)
+        y1 = self.m(x)
+        y2 = self.m(y1)
+        return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))
+
+
+class TFDetect(keras.layers.Layer):
+    # TF YOLOv5 Detect layer
+    def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None):  # detection layer
+        super().__init__()
+        self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
+        self.nc = nc  # number of classes
+        self.no = nc + 5  # number of outputs per anchor
+        self.nl = len(anchors)  # number of detection layers
+        self.na = len(anchors[0]) // 2  # number of anchors
+        self.grid = [tf.zeros(1)] * self.nl  # init grid
+        self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)
+        self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2])
+        self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]
+        self.training = False  # set to False after building model
+        self.imgsz = imgsz
+        for i in range(self.nl):
+            ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
+            self.grid[i] = self._make_grid(nx, ny)
+
+    def call(self, inputs):
+        z = []  # inference output
+        x = []
+        for i in range(self.nl):
+            x.append(self.m[i](inputs[i]))
+            # x(bs,20,20,255) to x(bs,3,20,20,85)
+            ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
+            x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no])
+
+            if not self.training:  # inference
+                y = x[i]
+                grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5
+                anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4
+                xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i]  # xy
+                wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid
+                # Normalize xywh to 0-1 to reduce calibration error
+                xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
+                wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
+                y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1)
+                z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
+
+        return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), )
+
+    @staticmethod
+    def _make_grid(nx=20, ny=20):
+        # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
+        # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
+        xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
+        return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)
+
+
+class TFSegment(TFDetect):
+    # YOLOv5 Segment head for segmentation models
+    def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None):
+        super().__init__(nc, anchors, ch, imgsz, w)
+        self.nm = nm  # number of masks
+        self.npr = npr  # number of protos
+        self.no = 5 + nc + self.nm  # number of outputs per anchor
+        self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]  # output conv
+        self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto)  # protos
+        self.detect = TFDetect.call
+
+    def call(self, x):
+        p = self.proto(x[0])
+        # p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0]))  # (optional) full-size protos
+        p = tf.transpose(p, [0, 3, 1, 2])  # from shape(1,160,160,32) to shape(1,32,160,160)
+        x = self.detect(self, x)
+        return (x, p) if self.training else (x[0], p)
+
+
+class TFProto(keras.layers.Layer):
+
+    def __init__(self, c1, c_=256, c2=32, w=None):
+        super().__init__()
+        self.cv1 = TFConv(c1, c_, k=3, w=w.cv1)
+        self.upsample = TFUpsample(None, scale_factor=2, mode='nearest')
+        self.cv2 = TFConv(c_, c_, k=3, w=w.cv2)
+        self.cv3 = TFConv(c_, c2, w=w.cv3)
+
+    def call(self, inputs):
+        return self.cv3(self.cv2(self.upsample(self.cv1(inputs))))
+
+
+class TFUpsample(keras.layers.Layer):
+    # TF version of torch.nn.Upsample()
+    def __init__(self, size, scale_factor, mode, w=None):  # warning: all arguments needed including 'w'
+        super().__init__()
+        assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2'
+        self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode)
+        # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
+        # with default arguments: align_corners=False, half_pixel_centers=False
+        # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
+        #                                                            size=(x.shape[1] * 2, x.shape[2] * 2))
+
+    def call(self, inputs):
+        return self.upsample(inputs)
+
+
+class TFConcat(keras.layers.Layer):
+    # TF version of torch.concat()
+    def __init__(self, dimension=1, w=None):
+        super().__init__()
+        assert dimension == 1, 'convert only NCHW to NHWC concat'
+        self.d = 3
+
+    def call(self, inputs):
+        return tf.concat(inputs, self.d)
+
+
+def parse_model(d, ch, model, imgsz):  # model_dict, input_channels(3)
+    LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10}  {'module':<40}{'arguments':<30}")
+    anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
+    na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors  # number of anchors
+    no = na * (nc + 5)  # number of outputs = anchors * (classes + 5)
+
+    layers, save, c2 = [], [], ch[-1]  # layers, savelist, ch out
+    for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']):  # from, number, module, args
+        m_str = m
+        m = eval(m) if isinstance(m, str) else m  # eval strings
+        for j, a in enumerate(args):
+            try:
+                args[j] = eval(a) if isinstance(a, str) else a  # eval strings
+            except NameError:
+                pass
+
+        n = max(round(n * gd), 1) if n > 1 else n  # depth gain
+        if m in [
+                nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv,
+                BottleneckCSP, C3, C3x]:
+            c1, c2 = ch[f], args[0]
+            c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
+
+            args = [c1, c2, *args[1:]]
+            if m in [BottleneckCSP, C3, C3x]:
+                args.insert(2, n)
+                n = 1
+        elif m is nn.BatchNorm2d:
+            args = [ch[f]]
+        elif m is Concat:
+            c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
+        elif m in [Detect, Segment]:
+            args.append([ch[x + 1] for x in f])
+            if isinstance(args[1], int):  # number of anchors
+                args[1] = [list(range(args[1] * 2))] * len(f)
+            if m is Segment:
+                args[3] = make_divisible(args[3] * gw, 8)
+            args.append(imgsz)
+        else:
+            c2 = ch[f]
+
+        tf_m = eval('TF' + m_str.replace('nn.', ''))
+        m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
+            else tf_m(*args, w=model.model[i])  # module
+
+        torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args)  # module
+        t = str(m)[8:-2].replace('__main__.', '')  # module type
+        np = sum(x.numel() for x in torch_m_.parameters())  # number params
+        m_.i, m_.f, m_.type, m_.np = i, f, t, np  # attach index, 'from' index, type, number params
+        LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10}  {t:<40}{str(args):<30}')  # print
+        save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1)  # append to savelist
+        layers.append(m_)
+        ch.append(c2)
+    return keras.Sequential(layers), sorted(save)
+
+
+class TFModel:
+    # TF YOLOv5 model
+    def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)):  # model, channels, classes
+        super().__init__()
+        if isinstance(cfg, dict):
+            self.yaml = cfg  # model dict
+        else:  # is *.yaml
+            import yaml  # for torch hub
+            self.yaml_file = Path(cfg).name
+            with open(cfg) as f:
+                self.yaml = yaml.load(f, Loader=yaml.FullLoader)  # model dict
+
+        # Define model
+        if nc and nc != self.yaml['nc']:
+            LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
+            self.yaml['nc'] = nc  # override yaml value
+        self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
+
+    def predict(self,
+                inputs,
+                tf_nms=False,
+                agnostic_nms=False,
+                topk_per_class=100,
+                topk_all=100,
+                iou_thres=0.45,
+                conf_thres=0.25):
+        y = []  # outputs
+        x = inputs
+        for m in self.model.layers:
+            if m.f != -1:  # if not from previous layer
+                x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]  # from earlier layers
+
+            x = m(x)  # run
+            y.append(x if m.i in self.savelist else None)  # save output
+
+        # Add TensorFlow NMS
+        if tf_nms:
+            boxes = self._xywh2xyxy(x[0][..., :4])
+            probs = x[0][:, :, 4:5]
+            classes = x[0][:, :, 5:]
+            scores = probs * classes
+            if agnostic_nms:
+                nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres)
+            else:
+                boxes = tf.expand_dims(boxes, 2)
+                nms = tf.image.combined_non_max_suppression(boxes,
+                                                            scores,
+                                                            topk_per_class,
+                                                            topk_all,
+                                                            iou_thres,
+                                                            conf_thres,
+                                                            clip_boxes=False)
+            return (nms, )
+        return x  # output [1,6300,85] = [xywh, conf, class0, class1, ...]
+        # x = x[0]  # [x(1,6300,85), ...] to x(6300,85)
+        # xywh = x[..., :4]  # x(6300,4) boxes
+        # conf = x[..., 4:5]  # x(6300,1) confidences
+        # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1))  # x(6300,1)  classes
+        # return tf.concat([conf, cls, xywh], 1)
+
+    @staticmethod
+    def _xywh2xyxy(xywh):
+        # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
+        x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)
+        return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
+
+
+class AgnosticNMS(keras.layers.Layer):
+    # TF Agnostic NMS
+    def call(self, input, topk_all, iou_thres, conf_thres):
+        # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450
+        return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres),
+                         input,
+                         fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),
+                         name='agnostic_nms')
+
+    @staticmethod
+    def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25):  # agnostic NMS
+        boxes, classes, scores = x
+        class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)
+        scores_inp = tf.reduce_max(scores, -1)
+        selected_inds = tf.image.non_max_suppression(boxes,
+                                                     scores_inp,
+                                                     max_output_size=topk_all,
+                                                     iou_threshold=iou_thres,
+                                                     score_threshold=conf_thres)
+        selected_boxes = tf.gather(boxes, selected_inds)
+        padded_boxes = tf.pad(selected_boxes,
+                              paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],
+                              mode='CONSTANT',
+                              constant_values=0.0)
+        selected_scores = tf.gather(scores_inp, selected_inds)
+        padded_scores = tf.pad(selected_scores,
+                               paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
+                               mode='CONSTANT',
+                               constant_values=-1.0)
+        selected_classes = tf.gather(class_inds, selected_inds)
+        padded_classes = tf.pad(selected_classes,
+                                paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
+                                mode='CONSTANT',
+                                constant_values=-1.0)
+        valid_detections = tf.shape(selected_inds)[0]
+        return padded_boxes, padded_scores, padded_classes, valid_detections
+
+
+def activations(act=nn.SiLU):
+    # Returns TF activation from input PyTorch activation
+    if isinstance(act, nn.LeakyReLU):
+        return lambda x: keras.activations.relu(x, alpha=0.1)
+    elif isinstance(act, nn.Hardswish):
+        return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667
+    elif isinstance(act, (nn.SiLU, SiLU)):
+        return lambda x: keras.activations.swish(x)
+    else:
+        raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}')
+
+
+def representative_dataset_gen(dataset, ncalib=100):
+    # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays
+    for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):
+        im = np.transpose(img, [1, 2, 0])
+        im = np.expand_dims(im, axis=0).astype(np.float32)
+        im /= 255
+        yield [im]
+        if n >= ncalib:
+            break
+
+
+def run(
+        weights=ROOT / 'yolov5s.pt',  # weights path
+        imgsz=(640, 640),  # inference size h,w
+        batch_size=1,  # batch size
+        dynamic=False,  # dynamic batch size
+):
+    # PyTorch model
+    im = torch.zeros((batch_size, 3, *imgsz))  # BCHW image
+    model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False)
+    _ = model(im)  # inference
+    model.info()
+
+    # TensorFlow model
+    im = tf.zeros((batch_size, *imgsz, 3))  # BHWC image
+    tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
+    _ = tf_model.predict(im)  # inference
+
+    # Keras model
+    im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
+    keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))
+    keras_model.summary()
+
+    LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.')
+
+
+def parse_opt():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
+    parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
+    parser.add_argument('--batch-size', type=int, default=1, help='batch size')
+    parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')
+    opt = parser.parse_args()
+    opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1  # expand
+    print_args(vars(opt))
+    return opt
+
+
+def main(opt):
+    run(**vars(opt))
+
+
+if __name__ == '__main__':
+    opt = parse_opt()
+    main(opt)

+ 391 - 0
models/yolo.py

@@ -0,0 +1,391 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+YOLO-specific modules
+
+Usage:
+    $ python models/yolo.py --cfg yolov5s.yaml
+"""
+
+import argparse
+import contextlib
+import os
+import platform
+import sys
+from copy import deepcopy
+from pathlib import Path
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+if platform.system() != 'Windows':
+    ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+
+from models.common import *  # noqa
+from models.experimental import *  # noqa
+from utils.autoanchor import check_anchor_order
+from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
+from utils.plots import feature_visualization
+from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
+                               time_sync)
+
+try:
+    import thop  # for FLOPs computation
+except ImportError:
+    thop = None
+
+
+class Detect(nn.Module):
+    # YOLOv5 Detect head for detection models
+    stride = None  # strides computed during build
+    dynamic = False  # force grid reconstruction
+    export = False  # export mode
+
+    def __init__(self, nc=80, anchors=(), ch=(), inplace=True):  # detection layer
+        super().__init__()
+        self.nc = nc  # number of classes
+        self.no = nc + 5  # number of outputs per anchor
+        self.nl = len(anchors)  # number of detection layers
+        self.na = len(anchors[0]) // 2  # number of anchors
+        self.grid = [torch.empty(0) for _ in range(self.nl)]  # init grid
+        self.anchor_grid = [torch.empty(0) for _ in range(self.nl)]  # init anchor grid
+        self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2))  # shape(nl,na,2)
+        self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch)  # output conv
+        self.inplace = inplace  # use inplace ops (e.g. slice assignment)
+
+    def forward(self, x):
+        z = []  # inference output
+        for i in range(self.nl):
+            x[i] = self.m[i](x[i])  # conv
+            bs, _, ny, nx = x[i].shape  # x(bs,255,20,20) to x(bs,3,20,20,85)
+            x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
+
+            if not self.training:  # inference
+                if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
+                    self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
+
+                if isinstance(self, Segment):  # (boxes + masks)
+                    xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
+                    xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i]  # xy
+                    wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i]  # wh
+                    y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
+                else:  # Detect (boxes only)
+                    xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
+                    xy = (xy * 2 + self.grid[i]) * self.stride[i]  # xy
+                    wh = (wh * 2) ** 2 * self.anchor_grid[i]  # wh
+                    y = torch.cat((xy, wh, conf), 4)
+                z.append(y.view(bs, self.na * nx * ny, self.no))
+
+        return x if self.training else (torch.cat(z, 1), ) if self.export else (torch.cat(z, 1), x)
+
+    def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
+        d = self.anchors[i].device
+        t = self.anchors[i].dtype
+        shape = 1, self.na, ny, nx, 2  # grid shape
+        y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
+        yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x)  # torch>=0.7 compatibility
+        grid = torch.stack((xv, yv), 2).expand(shape) - 0.5  # add grid offset, i.e. y = 2.0 * x - 0.5
+        anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
+        return grid, anchor_grid
+
+
+class Segment(Detect):
+    # YOLOv5 Segment head for segmentation models
+    def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
+        super().__init__(nc, anchors, ch, inplace)
+        self.nm = nm  # number of masks
+        self.npr = npr  # number of protos
+        self.no = 5 + nc + self.nm  # number of outputs per anchor
+        self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch)  # output conv
+        self.proto = Proto(ch[0], self.npr, self.nm)  # protos
+        self.detect = Detect.forward
+
+    def forward(self, x):
+        p = self.proto(x[0])
+        x = self.detect(self, x)
+        return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
+
+
+class BaseModel(nn.Module):
+    # YOLOv5 base model
+    def forward(self, x, profile=False, visualize=False):
+        return self._forward_once(x, profile, visualize)  # single-scale inference, train
+
+    def _forward_once(self, x, profile=False, visualize=False):
+        y, dt = [], []  # outputs
+        for m in self.model:
+            if m.f != -1:  # if not from previous layer
+                x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]  # from earlier layers
+            if profile:
+                self._profile_one_layer(m, x, dt)
+            x = m(x)  # run
+            y.append(x if m.i in self.save else None)  # save output
+            if visualize:
+                feature_visualization(x, m.type, m.i, save_dir=visualize)
+        return x
+
+    def _profile_one_layer(self, m, x, dt):
+        c = m == self.model[-1]  # is final layer, copy input as inplace fix
+        o = thop.profile(m, inputs=(x.copy() if c else x, ), verbose=False)[0] / 1E9 * 2 if thop else 0  # FLOPs
+        t = time_sync()
+        for _ in range(10):
+            m(x.copy() if c else x)
+        dt.append((time_sync() - t) * 100)
+        if m == self.model[0]:
+            LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s}  module")
+        LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f}  {m.type}')
+        if c:
+            LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s}  Total")
+
+    def fuse(self):  # fuse model Conv2d() + BatchNorm2d() layers
+        LOGGER.info('Fusing layers... ')
+        for m in self.model.modules():
+            if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
+                m.conv = fuse_conv_and_bn(m.conv, m.bn)  # update conv
+                delattr(m, 'bn')  # remove batchnorm
+                m.forward = m.forward_fuse  # update forward
+        self.info()
+        return self
+
+    def info(self, verbose=False, img_size=640):  # print model information
+        model_info(self, verbose, img_size)
+
+    def _apply(self, fn):
+        # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
+        self = super()._apply(fn)
+        m = self.model[-1]  # Detect()
+        if isinstance(m, (Detect, Segment)):
+            m.stride = fn(m.stride)
+            m.grid = list(map(fn, m.grid))
+            if isinstance(m.anchor_grid, list):
+                m.anchor_grid = list(map(fn, m.anchor_grid))
+        return self
+
+
+class DetectionModel(BaseModel):
+    # YOLOv5 detection model
+    def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None):  # model, input channels, number of classes
+        super().__init__()
+        if isinstance(cfg, dict):
+            self.yaml = cfg  # model dict
+        else:  # is *.yaml
+            import yaml  # for torch hub
+            self.yaml_file = Path(cfg).name
+            with open(cfg, encoding='ascii', errors='ignore') as f:
+                self.yaml = yaml.safe_load(f)  # model dict
+
+        # Define model
+        ch = self.yaml['ch'] = self.yaml.get('ch', ch)  # input channels
+        if nc and nc != self.yaml['nc']:
+            LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
+            self.yaml['nc'] = nc  # override yaml value
+        if anchors:
+            LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
+            self.yaml['anchors'] = round(anchors)  # override yaml value
+        self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch])  # model, savelist
+        self.names = [str(i) for i in range(self.yaml['nc'])]  # default names
+        self.inplace = self.yaml.get('inplace', True)
+
+        # Build strides, anchors
+        m = self.model[-1]  # Detect()
+        if isinstance(m, (Detect, Segment)):
+            s = 256  # 2x min stride
+            m.inplace = self.inplace
+            forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
+            m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))])  # forward
+            check_anchor_order(m)
+            m.anchors /= m.stride.view(-1, 1, 1)
+            self.stride = m.stride
+            self._initialize_biases()  # only run once
+
+        # Init weights, biases
+        initialize_weights(self)
+        self.info()
+        LOGGER.info('')
+
+    def forward(self, x, augment=False, profile=False, visualize=False):
+        if augment:
+            return self._forward_augment(x)  # augmented inference, None
+        return self._forward_once(x, profile, visualize)  # single-scale inference, train
+
+    def _forward_augment(self, x):
+        img_size = x.shape[-2:]  # height, width
+        s = [1, 0.83, 0.67]  # scales
+        f = [None, 3, None]  # flips (2-ud, 3-lr)
+        y = []  # outputs
+        for si, fi in zip(s, f):
+            xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
+            yi = self._forward_once(xi)[0]  # forward
+            # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1])  # save
+            yi = self._descale_pred(yi, fi, si, img_size)
+            y.append(yi)
+        y = self._clip_augmented(y)  # clip augmented tails
+        return torch.cat(y, 1), None  # augmented inference, train
+
+    def _descale_pred(self, p, flips, scale, img_size):
+        # de-scale predictions following augmented inference (inverse operation)
+        if self.inplace:
+            p[..., :4] /= scale  # de-scale
+            if flips == 2:
+                p[..., 1] = img_size[0] - p[..., 1]  # de-flip ud
+            elif flips == 3:
+                p[..., 0] = img_size[1] - p[..., 0]  # de-flip lr
+        else:
+            x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale  # de-scale
+            if flips == 2:
+                y = img_size[0] - y  # de-flip ud
+            elif flips == 3:
+                x = img_size[1] - x  # de-flip lr
+            p = torch.cat((x, y, wh, p[..., 4:]), -1)
+        return p
+
+    def _clip_augmented(self, y):
+        # Clip YOLOv5 augmented inference tails
+        nl = self.model[-1].nl  # number of detection layers (P3-P5)
+        g = sum(4 ** x for x in range(nl))  # grid points
+        e = 1  # exclude layer count
+        i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e))  # indices
+        y[0] = y[0][:, :-i]  # large
+        i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e))  # indices
+        y[-1] = y[-1][:, i:]  # small
+        return y
+
+    def _initialize_biases(self, cf=None):  # initialize biases into Detect(), cf is class frequency
+        # https://arxiv.org/abs/1708.02002 section 3.3
+        # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
+        m = self.model[-1]  # Detect() module
+        for mi, s in zip(m.m, m.stride):  # from
+            b = mi.bias.view(m.na, -1)  # conv.bias(255) to (3,85)
+            b.data[:, 4] += math.log(8 / (640 / s) ** 2)  # obj (8 objects per 640 image)
+            b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum())  # cls
+            mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+
+
+Model = DetectionModel  # retain YOLOv5 'Model' class for backwards compatibility
+
+
+class SegmentationModel(DetectionModel):
+    # YOLOv5 segmentation model
+    def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
+        super().__init__(cfg, ch, nc, anchors)
+
+
+class ClassificationModel(BaseModel):
+    # YOLOv5 classification model
+    def __init__(self, cfg=None, model=None, nc=1000, cutoff=10):  # yaml, model, number of classes, cutoff index
+        super().__init__()
+        self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
+
+    def _from_detection_model(self, model, nc=1000, cutoff=10):
+        # Create a YOLOv5 classification model from a YOLOv5 detection model
+        if isinstance(model, DetectMultiBackend):
+            model = model.model  # unwrap DetectMultiBackend
+        model.model = model.model[:cutoff]  # backbone
+        m = model.model[-1]  # last layer
+        ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels  # ch into module
+        c = Classify(ch, nc)  # Classify()
+        c.i, c.f, c.type = m.i, m.f, 'models.common.Classify'  # index, from, type
+        model.model[-1] = c  # replace
+        self.model = model.model
+        self.stride = model.stride
+        self.save = []
+        self.nc = nc
+
+    def _from_yaml(self, cfg):
+        # Create a YOLOv5 classification model from a *.yaml file
+        self.model = None
+
+
+def parse_model(d, ch):  # model_dict, input_channels(3)
+    # Parse a YOLOv5 model.yaml dictionary
+    LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10}  {'module':<40}{'arguments':<30}")
+    anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
+    if act:
+        Conv.default_act = eval(act)  # redefine default activation, i.e. Conv.default_act = nn.SiLU()
+        LOGGER.info(f"{colorstr('activation:')} {act}")  # print
+    na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors  # number of anchors
+    no = na * (nc + 5)  # number of outputs = anchors * (classes + 5)
+
+    layers, save, c2 = [], [], ch[-1]  # layers, savelist, ch out
+    for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']):  # from, number, module, args
+        m = eval(m) if isinstance(m, str) else m  # eval strings
+        for j, a in enumerate(args):
+            with contextlib.suppress(NameError):
+                args[j] = eval(a) if isinstance(a, str) else a  # eval strings
+
+        n = n_ = max(round(n * gd), 1) if n > 1 else n  # depth gain
+        if m in {
+                Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
+                BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
+            c1, c2 = ch[f], args[0]
+            if c2 != no:  # if not output
+                c2 = make_divisible(c2 * gw, 8)
+
+            args = [c1, c2, *args[1:]]
+            if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
+                args.insert(2, n)  # number of repeats
+                n = 1
+        elif m is nn.BatchNorm2d:
+            args = [ch[f]]
+        elif m is Concat:
+            c2 = sum(ch[x] for x in f)
+        # TODO: channel, gw, gd
+        elif m in {Detect, Segment}:
+            args.append([ch[x] for x in f])
+            if isinstance(args[1], int):  # number of anchors
+                args[1] = [list(range(args[1] * 2))] * len(f)
+            if m is Segment:
+                args[3] = make_divisible(args[3] * gw, 8)
+        elif m is Contract:
+            c2 = ch[f] * args[0] ** 2
+        elif m is Expand:
+            c2 = ch[f] // args[0] ** 2
+        else:
+            c2 = ch[f]
+
+        m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args)  # module
+        t = str(m)[8:-2].replace('__main__.', '')  # module type
+        np = sum(x.numel() for x in m_.parameters())  # number params
+        m_.i, m_.f, m_.type, m_.np = i, f, t, np  # attach index, 'from' index, type, number params
+        LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f}  {t:<40}{str(args):<30}')  # print
+        save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1)  # append to savelist
+        layers.append(m_)
+        if i == 0:
+            ch = []
+        ch.append(c2)
+    return nn.Sequential(*layers), sorted(save)
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
+    parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')
+    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+    parser.add_argument('--profile', action='store_true', help='profile model speed')
+    parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')
+    parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')
+    opt = parser.parse_args()
+    opt.cfg = check_yaml(opt.cfg)  # check YAML
+    print_args(vars(opt))
+    device = select_device(opt.device)
+
+    # Create model
+    im = torch.rand(opt.batch_size, 3, 640, 640).to(device)
+    model = Model(opt.cfg).to(device)
+
+    # Options
+    if opt.line_profile:  # profile layer by layer
+        model(im, profile=True)
+
+    elif opt.profile:  # profile forward-backward
+        results = profile(input=im, ops=[model], n=3)
+
+    elif opt.test:  # test all models
+        for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'):
+            try:
+                _ = Model(cfg)
+            except Exception as e:
+                print(f'Error in {cfg}: {e}')
+
+    else:  # report fused model summary
+        model.fuse()

+ 48 - 0
models/yolov5l.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.0  # model depth multiple
+width_multiple: 1.0  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
+  ]

+ 48 - 0
models/yolov5m.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.67  # model depth multiple
+width_multiple: 0.75  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
+  ]

+ 48 - 0
models/yolov5n.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.33  # model depth multiple
+width_multiple: 0.25  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
+  ]

+ 48 - 0
models/yolov5s.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 0.33  # model depth multiple
+width_multiple: 0.50  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
+  ]

+ 48 - 0
models/yolov5x.yaml

@@ -0,0 +1,48 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+
+# Parameters
+nc: 80  # number of classes
+depth_multiple: 1.33  # model depth multiple
+width_multiple: 1.25  # layer channel multiple
+anchors:
+  - [10,13, 16,30, 33,23]  # P3/8
+  - [30,61, 62,45, 59,119]  # P4/16
+  - [116,90, 156,198, 373,326]  # P5/32
+
+# YOLOv5 v6.0 backbone
+backbone:
+  # [from, number, module, args]
+  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2
+   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4
+   [-1, 3, C3, [128]],
+   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8
+   [-1, 6, C3, [256]],
+   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16
+   [-1, 9, C3, [512]],
+   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32
+   [-1, 3, C3, [1024]],
+   [-1, 1, SPPF, [1024, 5]],  # 9
+  ]
+
+# YOLOv5 v6.0 head
+head:
+  [[-1, 1, Conv, [512, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 6], 1, Concat, [1]],  # cat backbone P4
+   [-1, 3, C3, [512, False]],  # 13
+
+   [-1, 1, Conv, [256, 1, 1]],
+   [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+   [[-1, 4], 1, Concat, [1]],  # cat backbone P3
+   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)
+
+   [-1, 1, Conv, [256, 3, 2]],
+   [[-1, 14], 1, Concat, [1]],  # cat head P4
+   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)
+
+   [-1, 1, Conv, [512, 3, 2]],
+   [[-1, 10], 1, Concat, [1]],  # cat head P5
+   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)
+
+   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)
+  ]

BIN
projectnew.db


+ 285 - 0
segment/predict.py

@@ -0,0 +1,285 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Run YOLOv5 segmentation inference on images, videos, directories, streams, etc.
+
+Usage - sources:
+    $ python segment/predict.py --weights yolov5s-seg.pt --source 0                               # webcam
+                                                                  img.jpg                         # image
+                                                                  vid.mp4                         # video
+                                                                  screen                          # screenshot
+                                                                  path/                           # directory
+                                                                  list.txt                        # list of images
+                                                                  list.streams                    # list of streams
+                                                                  'path/*.jpg'                    # glob
+                                                                  'https://youtu.be/Zgi9g1ksQHc'  # YouTube
+                                                                  'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
+
+Usage - formats:
+    $ python segment/predict.py --weights yolov5s-seg.pt                 # PyTorch
+                                          yolov5s-seg.torchscript        # TorchScript
+                                          yolov5s-seg.onnx               # ONNX Runtime or OpenCV DNN with --dnn
+                                          yolov5s-seg_openvino_model     # OpenVINO
+                                          yolov5s-seg.engine             # TensorRT
+                                          yolov5s-seg.mlmodel            # CoreML (macOS-only)
+                                          yolov5s-seg_saved_model        # TensorFlow SavedModel
+                                          yolov5s-seg.pb                 # TensorFlow GraphDef
+                                          yolov5s-seg.tflite             # TensorFlow Lite
+                                          yolov5s-seg_edgetpu.tflite     # TensorFlow Edge TPU
+                                          yolov5s-seg_paddle_model       # PaddlePaddle
+"""
+
+import argparse
+import os
+import platform
+import sys
+from pathlib import Path
+
+import torch
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+
+from models.common import DetectMultiBackend
+from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
+from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
+                           increment_path, non_max_suppression, print_args, scale_boxes, scale_segments,
+                           strip_optimizer)
+from utils.plots import Annotator, colors, save_one_box
+from utils.segment.general import masks2segments, process_mask, process_mask_native
+from utils.torch_utils import select_device, smart_inference_mode
+
+
+@smart_inference_mode()
+def run(
+    weights=ROOT / 'yolov5s-seg.pt',  # model.pt path(s)
+    source=ROOT / 'data/images',  # file/dir/URL/glob/screen/0(webcam)
+    data=ROOT / 'data/coco128.yaml',  # dataset.yaml path
+    imgsz=(640, 640),  # inference size (height, width)
+    conf_thres=0.25,  # confidence threshold
+    iou_thres=0.45,  # NMS IOU threshold
+    max_det=1000,  # maximum detections per image
+    device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
+    view_img=False,  # show results
+    save_txt=False,  # save results to *.txt
+    save_conf=False,  # save confidences in --save-txt labels
+    save_crop=False,  # save cropped prediction boxes
+    nosave=False,  # do not save images/videos
+    classes=None,  # filter by class: --class 0, or --class 0 2 3
+    agnostic_nms=False,  # class-agnostic NMS
+    augment=False,  # augmented inference
+    visualize=False,  # visualize features
+    update=False,  # update all models
+    project=ROOT / 'runs/predict-seg',  # save results to project/name
+    name='exp',  # save results to project/name
+    exist_ok=False,  # existing project/name ok, do not increment
+    line_thickness=3,  # bounding box thickness (pixels)
+    hide_labels=False,  # hide labels
+    hide_conf=False,  # hide confidences
+    half=False,  # use FP16 half-precision inference
+    dnn=False,  # use OpenCV DNN for ONNX inference
+    vid_stride=1,  # video frame-rate stride
+    retina_masks=False,
+):
+    source = str(source)
+    save_img = not nosave and not source.endswith('.txt')  # save inference images
+    is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
+    is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
+    webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
+    screenshot = source.lower().startswith('screen')
+    if is_url and is_file:
+        source = check_file(source)  # download
+
+    # Directories
+    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
+    (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir
+
+    # Load model
+    device = select_device(device)
+    model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
+    stride, names, pt = model.stride, model.names, model.pt
+    imgsz = check_img_size(imgsz, s=stride)  # check image size
+
+    # Dataloader
+    bs = 1  # batch_size
+    if webcam:
+        #view_img = check_imshow(warn=True)
+        view_img = False
+        dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
+        bs = len(dataset)
+    elif screenshot:
+        dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
+    else:
+        dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
+    vid_path, vid_writer = [None] * bs, [None] * bs
+
+    # Run inference
+    model.warmup(imgsz=(1 if pt else bs, 3, *imgsz))  # warmup
+    seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
+    for path, im, im0s, vid_cap, s in dataset:
+        with dt[0]:
+            im = torch.from_numpy(im).to(model.device)
+            im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32
+            im /= 255  # 0 - 255 to 0.0 - 1.0
+            if len(im.shape) == 3:
+                im = im[None]  # expand for batch dim
+
+        # Inference
+        with dt[1]:
+            visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
+            pred, proto = model(im, augment=augment, visualize=visualize)[:2]
+
+        # NMS
+        with dt[2]:
+            pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32)
+
+        # Second-stage classifier (optional)
+        # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
+
+        # Process predictions
+        for i, det in enumerate(pred):  # per image
+            seen += 1
+            if webcam:  # batch_size >= 1
+                p, im0, frame = path[i], im0s[i].copy(), dataset.count
+                s += f'{i}: '
+            else:
+                p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
+
+            p = Path(p)  # to Path
+            save_path = str(save_dir / p.name)  # im.jpg
+            txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}')  # im.txt
+            s += '%gx%g ' % im.shape[2:]  # print string
+            imc = im0.copy() if save_crop else im0  # for save_crop
+            annotator = Annotator(im0, line_width=line_thickness, example=str(names))
+            if len(det):
+                if retina_masks:
+                    # scale bbox first the crop masks
+                    det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()  # rescale boxes to im0 size
+                    masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2])  # HWC
+                else:
+                    masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True)  # HWC
+                    det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()  # rescale boxes to im0 size
+
+                # Segments
+                if save_txt:
+                    segments = [
+                        scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True)
+                        for x in reversed(masks2segments(masks))]
+
+                # Print results
+                for c in det[:, 5].unique():
+                    n = (det[:, 5] == c).sum()  # detections per class
+                    s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string
+
+                # Mask plotting
+                annotator.masks(
+                    masks,
+                    colors=[colors(x, True) for x in det[:, 5]],
+                    im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() /
+                    255 if retina_masks else im[i])
+
+                # Write results
+                for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])):
+                    if save_txt:  # Write to file
+                        seg = segments[j].reshape(-1)  # (n,2) to (n*2)
+                        line = (cls, *seg, conf) if save_conf else (cls, *seg)  # label format
+                        with open(f'{txt_path}.txt', 'a') as f:
+                            f.write(('%g ' * len(line)).rstrip() % line + '\n')
+
+                    if save_img or save_crop or view_img:  # Add bbox to image
+                        c = int(cls)  # integer class
+                        label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
+                        annotator.box_label(xyxy, label, color=colors(c, True))
+                        # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3)
+                    if save_crop:
+                        save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
+
+            # Stream results
+            im0 = annotator.result()
+            if view_img:
+                if platform.system() == 'Linux' and p not in windows:
+                    windows.append(p)
+                    cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
+                    cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
+                cv2.imshow(str(p), im0)
+                if cv2.waitKey(1) == ord('q'):  # 1 millisecond
+                    exit()
+
+            # Save results (image with detections)
+            if save_img:
+                if dataset.mode == 'image':
+                    cv2.imwrite(save_path, im0)
+                else:  # 'video' or 'stream'
+                    if vid_path[i] != save_path:  # new video
+                        vid_path[i] = save_path
+                        if isinstance(vid_writer[i], cv2.VideoWriter):
+                            vid_writer[i].release()  # release previous video writer
+                        if vid_cap:  # video
+                            fps = vid_cap.get(cv2.CAP_PROP_FPS)
+                            w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+                            h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+                        else:  # stream
+                            fps, w, h = 30, im0.shape[1], im0.shape[0]
+                        save_path = str(Path(save_path).with_suffix('.mp4'))  # force *.mp4 suffix on results videos
+                        vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
+                    vid_writer[i].write(im0)
+
+        # Print time (inference-only)
+        LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
+
+    # Print results
+    t = tuple(x.t / seen * 1E3 for x in dt)  # speeds per image
+    LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
+    if save_txt or save_img:
+        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
+        LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
+    if update:
+        strip_optimizer(weights[0])  # update model (to fix SourceChangeWarning)
+
+
+def parse_opt():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)')
+    parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
+    parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
+    parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
+    parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
+    parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
+    parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
+    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+    parser.add_argument('--view-img', action='store_true', help='show results')
+    parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
+    parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
+    parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
+    parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
+    parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
+    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
+    parser.add_argument('--augment', action='store_true', help='augmented inference')
+    parser.add_argument('--visualize', action='store_true', help='visualize features')
+    parser.add_argument('--update', action='store_true', help='update all models')
+    parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name')
+    parser.add_argument('--name', default='exp', help='save results to project/name')
+    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+    parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
+    parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
+    parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
+    parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
+    parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
+    parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
+    parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution')
+    opt = parser.parse_args()
+    opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1  # expand
+    print_args(vars(opt))
+    return opt
+
+
+def main(opt):
+    check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+    run(**vars(opt))
+
+
+if __name__ == '__main__':
+    opt = parse_opt()
+    main(opt)

+ 666 - 0
segment/train.py

@@ -0,0 +1,666 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Train a YOLOv5 segment model on a segment dataset
+Models and datasets download automatically from the latest YOLOv5 release.
+
+Usage - Single-GPU training:
+    $ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640  # from pretrained (recommended)
+    $ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640  # from scratch
+
+Usage - Multi-GPU DDP training:
+    $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
+
+Models:     https://github.com/ultralytics/yolov5/tree/master/models
+Datasets:   https://github.com/ultralytics/yolov5/tree/master/data
+Tutorial:   https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
+"""
+
+import argparse
+import math
+import os
+import random
+import subprocess
+import sys
+import time
+from copy import deepcopy
+from datetime import datetime
+from pathlib import Path
+
+import numpy as np
+import torch
+import torch.distributed as dist
+import torch.nn as nn
+import yaml
+from torch.optim import lr_scheduler
+from tqdm import tqdm
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+
+import segment.val as validate  # for end-of-epoch mAP
+from models.experimental import attempt_load
+from models.yolo import SegmentationModel
+from utils.autoanchor import check_anchors
+from utils.autobatch import check_train_batch_size
+from utils.callbacks import Callbacks
+from utils.downloads import attempt_download, is_url
+from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info,
+                           check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr,
+                           get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
+                           labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save)
+from utils.loggers import GenericLogger
+from utils.plots import plot_evolve, plot_labels
+from utils.segment.dataloaders import create_dataloader
+from utils.segment.loss import ComputeLoss
+from utils.segment.metrics import KEYS, fitness
+from utils.segment.plots import plot_images_and_masks, plot_results_with_masks
+from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer,
+                               smart_resume, torch_distributed_zero_first)
+
+LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1))  # https://pytorch.org/docs/stable/elastic/run.html
+RANK = int(os.getenv('RANK', -1))
+WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
+GIT_INFO = check_git_info()
+
+
+def train(hyp, opt, device, callbacks):  # hyp is path/to/hyp.yaml or hyp dictionary
+    save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \
+        Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
+        opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio
+    # callbacks.run('on_pretrain_routine_start')
+
+    # Directories
+    w = save_dir / 'weights'  # weights dir
+    (w.parent if evolve else w).mkdir(parents=True, exist_ok=True)  # make dir
+    last, best = w / 'last.pt', w / 'best.pt'
+
+    # Hyperparameters
+    if isinstance(hyp, str):
+        with open(hyp, errors='ignore') as f:
+            hyp = yaml.safe_load(f)  # load hyps dict
+    LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
+    opt.hyp = hyp.copy()  # for saving hyps to checkpoints
+
+    # Save run settings
+    if not evolve:
+        yaml_save(save_dir / 'hyp.yaml', hyp)
+        yaml_save(save_dir / 'opt.yaml', vars(opt))
+
+    # Loggers
+    data_dict = None
+    if RANK in {-1, 0}:
+        logger = GenericLogger(opt=opt, console_logger=LOGGER)
+
+    # Config
+    plots = not evolve and not opt.noplots  # create plots
+    overlap = not opt.no_overlap
+    cuda = device.type != 'cpu'
+    init_seeds(opt.seed + 1 + RANK, deterministic=True)
+    with torch_distributed_zero_first(LOCAL_RANK):
+        data_dict = data_dict or check_dataset(data)  # check if None
+    train_path, val_path = data_dict['train'], data_dict['val']
+    nc = 1 if single_cls else int(data_dict['nc'])  # number of classes
+    names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names']  # class names
+    is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt')  # COCO dataset
+
+    # Model
+    check_suffix(weights, '.pt')  # check weights
+    pretrained = weights.endswith('.pt')
+    if pretrained:
+        with torch_distributed_zero_first(LOCAL_RANK):
+            weights = attempt_download(weights)  # download if not found locally
+        ckpt = torch.load(weights, map_location='cpu')  # load checkpoint to CPU to avoid CUDA memory leak
+        model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device)
+        exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else []  # exclude keys
+        csd = ckpt['model'].float().state_dict()  # checkpoint state_dict as FP32
+        csd = intersect_dicts(csd, model.state_dict(), exclude=exclude)  # intersect
+        model.load_state_dict(csd, strict=False)  # load
+        LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}')  # report
+    else:
+        model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device)  # create
+    amp = check_amp(model)  # check AMP
+
+    # Freeze
+    freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))]  # layers to freeze
+    for k, v in model.named_parameters():
+        v.requires_grad = True  # train all layers
+        # v.register_hook(lambda x: torch.nan_to_num(x))  # NaN to 0 (commented for erratic training results)
+        if any(x in k for x in freeze):
+            LOGGER.info(f'freezing {k}')
+            v.requires_grad = False
+
+    # Image size
+    gs = max(int(model.stride.max()), 32)  # grid size (max stride)
+    imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2)  # verify imgsz is gs-multiple
+
+    # Batch size
+    if RANK == -1 and batch_size == -1:  # single-GPU only, estimate best batch size
+        batch_size = check_train_batch_size(model, imgsz, amp)
+        logger.update_params({'batch_size': batch_size})
+        # loggers.on_params_update({"batch_size": batch_size})
+
+    # Optimizer
+    nbs = 64  # nominal batch size
+    accumulate = max(round(nbs / batch_size), 1)  # accumulate loss before optimizing
+    hyp['weight_decay'] *= batch_size * accumulate / nbs  # scale weight_decay
+    optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
+
+    # Scheduler
+    if opt.cos_lr:
+        lf = one_cycle(1, hyp['lrf'], epochs)  # cosine 1->hyp['lrf']
+    else:
+        lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']  # linear
+    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)  # plot_lr_scheduler(optimizer, scheduler, epochs)
+
+    # EMA
+    ema = ModelEMA(model) if RANK in {-1, 0} else None
+
+    # Resume
+    best_fitness, start_epoch = 0.0, 0
+    if pretrained:
+        if resume:
+            best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
+        del ckpt, csd
+
+    # DP mode
+    if cuda and RANK == -1 and torch.cuda.device_count() > 1:
+        LOGGER.warning(
+            'WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
+            'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.'
+        )
+        model = torch.nn.DataParallel(model)
+
+    # SyncBatchNorm
+    if opt.sync_bn and cuda and RANK != -1:
+        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
+        LOGGER.info('Using SyncBatchNorm()')
+
+    # Trainloader
+    train_loader, dataset = create_dataloader(
+        train_path,
+        imgsz,
+        batch_size // WORLD_SIZE,
+        gs,
+        single_cls,
+        hyp=hyp,
+        augment=True,
+        cache=None if opt.cache == 'val' else opt.cache,
+        rect=opt.rect,
+        rank=LOCAL_RANK,
+        workers=workers,
+        image_weights=opt.image_weights,
+        quad=opt.quad,
+        prefix=colorstr('train: '),
+        shuffle=True,
+        mask_downsample_ratio=mask_ratio,
+        overlap_mask=overlap,
+    )
+    labels = np.concatenate(dataset.labels, 0)
+    mlc = int(labels[:, 0].max())  # max label class
+    assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
+
+    # Process 0
+    if RANK in {-1, 0}:
+        val_loader = create_dataloader(val_path,
+                                       imgsz,
+                                       batch_size // WORLD_SIZE * 2,
+                                       gs,
+                                       single_cls,
+                                       hyp=hyp,
+                                       cache=None if noval else opt.cache,
+                                       rect=True,
+                                       rank=-1,
+                                       workers=workers * 2,
+                                       pad=0.5,
+                                       mask_downsample_ratio=mask_ratio,
+                                       overlap_mask=overlap,
+                                       prefix=colorstr('val: '))[0]
+
+        if not resume:
+            if not opt.noautoanchor:
+                check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)  # run AutoAnchor
+            model.half().float()  # pre-reduce anchor precision
+
+            if plots:
+                plot_labels(labels, names, save_dir)
+        # callbacks.run('on_pretrain_routine_end', labels, names)
+
+    # DDP mode
+    if cuda and RANK != -1:
+        model = smart_DDP(model)
+
+    # Model attributes
+    nl = de_parallel(model).model[-1].nl  # number of detection layers (to scale hyps)
+    hyp['box'] *= 3 / nl  # scale to layers
+    hyp['cls'] *= nc / 80 * 3 / nl  # scale to classes and layers
+    hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl  # scale to image size and layers
+    hyp['label_smoothing'] = opt.label_smoothing
+    model.nc = nc  # attach number of classes to model
+    model.hyp = hyp  # attach hyperparameters to model
+    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc  # attach class weights
+    model.names = names
+
+    # Start training
+    t0 = time.time()
+    nb = len(train_loader)  # number of batches
+    nw = max(round(hyp['warmup_epochs'] * nb), 100)  # number of warmup iterations, max(3 epochs, 100 iterations)
+    # nw = min(nw, (epochs - start_epoch) / 2 * nb)  # limit warmup to < 1/2 of training
+    last_opt_step = -1
+    maps = np.zeros(nc)  # mAP per class
+    results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)  # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
+    scheduler.last_epoch = start_epoch - 1  # do not move
+    scaler = torch.cuda.amp.GradScaler(enabled=amp)
+    stopper, stop = EarlyStopping(patience=opt.patience), False
+    compute_loss = ComputeLoss(model, overlap=overlap)  # init loss class
+    # callbacks.run('on_train_start')
+    LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
+                f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
+                f"Logging results to {colorstr('bold', save_dir)}\n"
+                f'Starting training for {epochs} epochs...')
+    for epoch in range(start_epoch, epochs):  # epoch ------------------------------------------------------------------
+        # callbacks.run('on_train_epoch_start')
+        model.train()
+
+        # Update image weights (optional, single-GPU only)
+        if opt.image_weights:
+            cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc  # class weights
+            iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw)  # image weights
+            dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n)  # rand weighted idx
+
+        # Update mosaic border (optional)
+        # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
+        # dataset.mosaic_border = [b - imgsz, -b]  # height, width borders
+
+        mloss = torch.zeros(4, device=device)  # mean losses
+        if RANK != -1:
+            train_loader.sampler.set_epoch(epoch)
+        pbar = enumerate(train_loader)
+        LOGGER.info(('\n' + '%11s' * 8) %
+                    ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size'))
+        if RANK in {-1, 0}:
+            pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT)  # progress bar
+        optimizer.zero_grad()
+        for i, (imgs, targets, paths, _, masks) in pbar:  # batch ------------------------------------------------------
+            # callbacks.run('on_train_batch_start')
+            ni = i + nb * epoch  # number integrated batches (since train start)
+            imgs = imgs.to(device, non_blocking=True).float() / 255  # uint8 to float32, 0-255 to 0.0-1.0
+
+            # Warmup
+            if ni <= nw:
+                xi = [0, nw]  # x interp
+                # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0])  # iou loss ratio (obj_loss = 1.0 or iou)
+                accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
+                for j, x in enumerate(optimizer.param_groups):
+                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
+                    x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)])
+                    if 'momentum' in x:
+                        x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
+
+            # Multi-scale
+            if opt.multi_scale:
+                sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs  # size
+                sf = sz / max(imgs.shape[2:])  # scale factor
+                if sf != 1:
+                    ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to gs-multiple)
+                    imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
+
+            # Forward
+            with torch.cuda.amp.autocast(amp):
+                pred = model(imgs)  # forward
+                loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float())
+                if RANK != -1:
+                    loss *= WORLD_SIZE  # gradient averaged between devices in DDP mode
+                if opt.quad:
+                    loss *= 4.
+
+            # Backward
+            scaler.scale(loss).backward()
+
+            # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
+            if ni - last_opt_step >= accumulate:
+                scaler.unscale_(optimizer)  # unscale gradients
+                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0)  # clip gradients
+                scaler.step(optimizer)  # optimizer.step
+                scaler.update()
+                optimizer.zero_grad()
+                if ema:
+                    ema.update(model)
+                last_opt_step = ni
+
+            # Log
+            if RANK in {-1, 0}:
+                mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses
+                mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
+                pbar.set_description(('%11s' * 2 + '%11.4g' * 6) %
+                                     (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
+                # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths)
+                # if callbacks.stop_training:
+                #    return
+
+                # Mosaic plots
+                if plots:
+                    if ni < 3:
+                        plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg')
+                    if ni == 10:
+                        files = sorted(save_dir.glob('train*.jpg'))
+                        logger.log_images(files, 'Mosaics', epoch)
+            # end batch ------------------------------------------------------------------------------------------------
+
+        # Scheduler
+        lr = [x['lr'] for x in optimizer.param_groups]  # for loggers
+        scheduler.step()
+
+        if RANK in {-1, 0}:
+            # mAP
+            # callbacks.run('on_train_epoch_end', epoch=epoch)
+            ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
+            final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
+            if not noval or final_epoch:  # Calculate mAP
+                results, maps, _ = validate.run(data_dict,
+                                                batch_size=batch_size // WORLD_SIZE * 2,
+                                                imgsz=imgsz,
+                                                half=amp,
+                                                model=ema.ema,
+                                                single_cls=single_cls,
+                                                dataloader=val_loader,
+                                                save_dir=save_dir,
+                                                plots=False,
+                                                callbacks=callbacks,
+                                                compute_loss=compute_loss,
+                                                mask_downsample_ratio=mask_ratio,
+                                                overlap=overlap)
+
+            # Update best mAP
+            fi = fitness(np.array(results).reshape(1, -1))  # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
+            stop = stopper(epoch=epoch, fitness=fi)  # early stop check
+            if fi > best_fitness:
+                best_fitness = fi
+            log_vals = list(mloss) + list(results) + lr
+            # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
+            # Log val metrics and media
+            metrics_dict = dict(zip(KEYS, log_vals))
+            logger.log_metrics(metrics_dict, epoch)
+
+            # Save model
+            if (not nosave) or (final_epoch and not evolve):  # if save
+                ckpt = {
+                    'epoch': epoch,
+                    'best_fitness': best_fitness,
+                    'model': deepcopy(de_parallel(model)).half(),
+                    'ema': deepcopy(ema.ema).half(),
+                    'updates': ema.updates,
+                    'optimizer': optimizer.state_dict(),
+                    'opt': vars(opt),
+                    'git': GIT_INFO,  # {remote, branch, commit} if a git repo
+                    'date': datetime.now().isoformat()}
+
+                # Save last, best and delete
+                torch.save(ckpt, last)
+                if best_fitness == fi:
+                    torch.save(ckpt, best)
+                if opt.save_period > 0 and epoch % opt.save_period == 0:
+                    torch.save(ckpt, w / f'epoch{epoch}.pt')
+                    logger.log_model(w / f'epoch{epoch}.pt')
+                del ckpt
+                # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
+
+        # EarlyStopping
+        if RANK != -1:  # if DDP training
+            broadcast_list = [stop if RANK == 0 else None]
+            dist.broadcast_object_list(broadcast_list, 0)  # broadcast 'stop' to all ranks
+            if RANK != 0:
+                stop = broadcast_list[0]
+        if stop:
+            break  # must break all DDP ranks
+
+        # end epoch ----------------------------------------------------------------------------------------------------
+    # end training -----------------------------------------------------------------------------------------------------
+    if RANK in {-1, 0}:
+        LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
+        for f in last, best:
+            if f.exists():
+                strip_optimizer(f)  # strip optimizers
+                if f is best:
+                    LOGGER.info(f'\nValidating {f}...')
+                    results, _, _ = validate.run(
+                        data_dict,
+                        batch_size=batch_size // WORLD_SIZE * 2,
+                        imgsz=imgsz,
+                        model=attempt_load(f, device).half(),
+                        iou_thres=0.65 if is_coco else 0.60,  # best pycocotools at iou 0.65
+                        single_cls=single_cls,
+                        dataloader=val_loader,
+                        save_dir=save_dir,
+                        save_json=is_coco,
+                        verbose=True,
+                        plots=plots,
+                        callbacks=callbacks,
+                        compute_loss=compute_loss,
+                        mask_downsample_ratio=mask_ratio,
+                        overlap=overlap)  # val best model with plots
+                    if is_coco:
+                        # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
+                        metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr))
+                        logger.log_metrics(metrics_dict, epoch)
+
+        # callbacks.run('on_train_end', last, best, epoch, results)
+        # on train end callback using genericLogger
+        logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs)
+        if not opt.evolve:
+            logger.log_model(best, epoch)
+        if plots:
+            plot_results_with_masks(file=save_dir / 'results.csv')  # save results.png
+            files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
+            files = [(save_dir / f) for f in files if (save_dir / f).exists()]  # filter
+            LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
+            logger.log_images(files, 'Results', epoch + 1)
+            logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1)
+    torch.cuda.empty_cache()
+    return results
+
+
+def parse_opt(known=False):
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path')
+    parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
+    parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
+    parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
+    parser.add_argument('--epochs', type=int, default=100, help='total training epochs')
+    parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
+    parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
+    parser.add_argument('--rect', action='store_true', help='rectangular training')
+    parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
+    parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
+    parser.add_argument('--noval', action='store_true', help='only validate final epoch')
+    parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
+    parser.add_argument('--noplots', action='store_true', help='save no plot files')
+    parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
+    parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
+    parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk')
+    parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
+    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+    parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
+    parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
+    parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
+    parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
+    parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
+    parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name')
+    parser.add_argument('--name', default='exp', help='save to project/name')
+    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+    parser.add_argument('--quad', action='store_true', help='quad dataloader')
+    parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
+    parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
+    parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
+    parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
+    parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
+    parser.add_argument('--seed', type=int, default=0, help='Global training seed')
+    parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
+
+    # Instance Segmentation Args
+    parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory')
+    parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP')
+
+    return parser.parse_known_args()[0] if known else parser.parse_args()
+
+
+def main(opt, callbacks=Callbacks()):
+    # Checks
+    if RANK in {-1, 0}:
+        print_args(vars(opt))
+        check_git_status()
+        check_requirements(ROOT / 'requirements.txt')
+
+    # Resume
+    if opt.resume and not opt.evolve:  # resume from specified or most recent last.pt
+        last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
+        opt_yaml = last.parent.parent / 'opt.yaml'  # train options yaml
+        opt_data = opt.data  # original dataset
+        if opt_yaml.is_file():
+            with open(opt_yaml, errors='ignore') as f:
+                d = yaml.safe_load(f)
+        else:
+            d = torch.load(last, map_location='cpu')['opt']
+        opt = argparse.Namespace(**d)  # replace
+        opt.cfg, opt.weights, opt.resume = '', str(last), True  # reinstate
+        if is_url(opt_data):
+            opt.data = check_file(opt_data)  # avoid HUB resume auth timeout
+    else:
+        opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
+            check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project)  # checks
+        assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
+        if opt.evolve:
+            if opt.project == str(ROOT / 'runs/train-seg'):  # if default project name, rename to runs/evolve-seg
+                opt.project = str(ROOT / 'runs/evolve-seg')
+            opt.exist_ok, opt.resume = opt.resume, False  # pass resume to exist_ok and disable resume
+        if opt.name == 'cfg':
+            opt.name = Path(opt.cfg).stem  # use model.yaml as name
+        opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
+
+    # DDP mode
+    device = select_device(opt.device, batch_size=opt.batch_size)
+    if LOCAL_RANK != -1:
+        msg = 'is not compatible with YOLOv5 Multi-GPU DDP training'
+        assert not opt.image_weights, f'--image-weights {msg}'
+        assert not opt.evolve, f'--evolve {msg}'
+        assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
+        assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
+        assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
+        torch.cuda.set_device(LOCAL_RANK)
+        device = torch.device('cuda', LOCAL_RANK)
+        dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo')
+
+    # Train
+    if not opt.evolve:
+        train(opt.hyp, opt, device, callbacks)
+
+    # Evolve hyperparameters (optional)
+    else:
+        # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
+        meta = {
+            'lr0': (1, 1e-5, 1e-1),  # initial learning rate (SGD=1E-2, Adam=1E-3)
+            'lrf': (1, 0.01, 1.0),  # final OneCycleLR learning rate (lr0 * lrf)
+            'momentum': (0.3, 0.6, 0.98),  # SGD momentum/Adam beta1
+            'weight_decay': (1, 0.0, 0.001),  # optimizer weight decay
+            'warmup_epochs': (1, 0.0, 5.0),  # warmup epochs (fractions ok)
+            'warmup_momentum': (1, 0.0, 0.95),  # warmup initial momentum
+            'warmup_bias_lr': (1, 0.0, 0.2),  # warmup initial bias lr
+            'box': (1, 0.02, 0.2),  # box loss gain
+            'cls': (1, 0.2, 4.0),  # cls loss gain
+            'cls_pw': (1, 0.5, 2.0),  # cls BCELoss positive_weight
+            'obj': (1, 0.2, 4.0),  # obj loss gain (scale with pixels)
+            'obj_pw': (1, 0.5, 2.0),  # obj BCELoss positive_weight
+            'iou_t': (0, 0.1, 0.7),  # IoU training threshold
+            'anchor_t': (1, 2.0, 8.0),  # anchor-multiple threshold
+            'anchors': (2, 2.0, 10.0),  # anchors per output grid (0 to ignore)
+            'fl_gamma': (0, 0.0, 2.0),  # focal loss gamma (efficientDet default gamma=1.5)
+            'hsv_h': (1, 0.0, 0.1),  # image HSV-Hue augmentation (fraction)
+            'hsv_s': (1, 0.0, 0.9),  # image HSV-Saturation augmentation (fraction)
+            'hsv_v': (1, 0.0, 0.9),  # image HSV-Value augmentation (fraction)
+            'degrees': (1, 0.0, 45.0),  # image rotation (+/- deg)
+            'translate': (1, 0.0, 0.9),  # image translation (+/- fraction)
+            'scale': (1, 0.0, 0.9),  # image scale (+/- gain)
+            'shear': (1, 0.0, 10.0),  # image shear (+/- deg)
+            'perspective': (0, 0.0, 0.001),  # image perspective (+/- fraction), range 0-0.001
+            'flipud': (1, 0.0, 1.0),  # image flip up-down (probability)
+            'fliplr': (0, 0.0, 1.0),  # image flip left-right (probability)
+            'mosaic': (1, 0.0, 1.0),  # image mixup (probability)
+            'mixup': (1, 0.0, 1.0),  # image mixup (probability)
+            'copy_paste': (1, 0.0, 1.0)}  # segment copy-paste (probability)
+
+        with open(opt.hyp, errors='ignore') as f:
+            hyp = yaml.safe_load(f)  # load hyps dict
+            if 'anchors' not in hyp:  # anchors commented in hyp.yaml
+                hyp['anchors'] = 3
+        if opt.noautoanchor:
+            del hyp['anchors'], meta['anchors']
+        opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir)  # only val/save final epoch
+        # ei = [isinstance(x, (int, float)) for x in hyp.values()]  # evolvable indices
+        evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
+        if opt.bucket:
+            # download evolve.csv if exists
+            subprocess.run([
+                'gsutil',
+                'cp',
+                f'gs://{opt.bucket}/evolve.csv',
+                str(evolve_csv), ])
+
+        for _ in range(opt.evolve):  # generations to evolve
+            if evolve_csv.exists():  # if evolve.csv exists: select best hyps and mutate
+                # Select parent(s)
+                parent = 'single'  # parent selection method: 'single' or 'weighted'
+                x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
+                n = min(5, len(x))  # number of previous results to consider
+                x = x[np.argsort(-fitness(x))][:n]  # top n mutations
+                w = fitness(x) - fitness(x).min() + 1E-6  # weights (sum > 0)
+                if parent == 'single' or len(x) == 1:
+                    # x = x[random.randint(0, n - 1)]  # random selection
+                    x = x[random.choices(range(n), weights=w)[0]]  # weighted selection
+                elif parent == 'weighted':
+                    x = (x * w.reshape(n, 1)).sum(0) / w.sum()  # weighted combination
+
+                # Mutate
+                mp, s = 0.8, 0.2  # mutation probability, sigma
+                npr = np.random
+                npr.seed(int(time.time()))
+                g = np.array([meta[k][0] for k in hyp.keys()])  # gains 0-1
+                ng = len(meta)
+                v = np.ones(ng)
+                while all(v == 1):  # mutate until a change occurs (prevent duplicates)
+                    v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
+                for i, k in enumerate(hyp.keys()):  # plt.hist(v.ravel(), 300)
+                    hyp[k] = float(x[i + 12] * v[i])  # mutate
+
+            # Constrain to limits
+            for k, v in meta.items():
+                hyp[k] = max(hyp[k], v[1])  # lower limit
+                hyp[k] = min(hyp[k], v[2])  # upper limit
+                hyp[k] = round(hyp[k], 5)  # significant digits
+
+            # Train mutation
+            results = train(hyp.copy(), opt, device, callbacks)
+            callbacks = Callbacks()
+            # Write mutation results
+            print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket)
+
+        # Plot results
+        plot_evolve(evolve_csv)
+        LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
+                    f"Results saved to {colorstr('bold', save_dir)}\n"
+                    f'Usage example: $ python train.py --hyp {evolve_yaml}')
+
+
+def run(**kwargs):
+    # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
+    opt = parse_opt(True)
+    for k, v in kwargs.items():
+        setattr(opt, k, v)
+    main(opt)
+    return opt
+
+
+if __name__ == '__main__':
+    opt = parse_opt()
+    main(opt)

+ 595 - 0
segment/tutorial.ipynb

@@ -0,0 +1,595 @@
+{
+  "cells": [
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "t6MPjfT5NrKQ"
+      },
+      "source": [
+        "<div align=\"center\">\n",
+        "\n",
+        "  <a href=\"https://ultralytics.com/yolov5\" target=\"_blank\">\n",
+        "    <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png\"></a>\n",
+        "\n",
+        "\n",
+        "<br>\n",
+        "  <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a>\n",
+        "  <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
+        "  <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
+        "<br>\n",
+        "\n",
+        "This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>See <a href=\"https://github.com/ultralytics/yolov5/issues/new/choose\">GitHub</a> for community support or <a href=\"https://ultralytics.com/contact\">contact us</a> for professional support.\n",
+        "\n",
+        "</div>"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "7mGmQbAO5pQb"
+      },
+      "source": [
+        "# Setup\n",
+        "\n",
+        "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "wbvMlHd_QwMG",
+        "outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stderr",
+          "text": [
+            "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n"
+          ]
+        },
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n"
+          ]
+        }
+      ],
+      "source": [
+        "!git clone https://github.com/ultralytics/yolov5  # clone\n",
+        "%cd yolov5\n",
+        "%pip install -qr requirements.txt comet_ml  # install\n",
+        "\n",
+        "import torch\n",
+        "import utils\n",
+        "display = utils.notebook_init()  # checks"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "4JnkELT0cIJg"
+      },
+      "source": [
+        "# 1. Predict\n",
+        "\n",
+        "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n",
+        "\n",
+        "```shell\n",
+        "python segment/predict.py --source 0  # webcam\n",
+        "                             img.jpg  # image \n",
+        "                             vid.mp4  # video\n",
+        "                             screen  # screenshot\n",
+        "                             path/  # directory\n",
+        "                             'path/*.jpg'  # glob\n",
+        "                             'https://youtu.be/Zgi9g1ksQHc'  # YouTube\n",
+        "                             'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream\n",
+        "```"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "zR9ZbuQCH7FX",
+        "outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n",
+            "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
+            "\n",
+            "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n",
+            "100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n",
+            "\n",
+            "Fusing layers... \n",
+            "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
+            "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n",
+            "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n",
+            "Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n",
+            "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n"
+          ]
+        }
+      ],
+      "source": [
+        "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n",
+        "#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "hkAzDWJ7cWTr"
+      },
+      "source": [
+        "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\n",
+        "<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/199030123-08c72f8d-6871-4116-8ed3-c373642cf28e.jpg\" width=\"600\">"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "0eq1SMWl6Sfn"
+      },
+      "source": [
+        "# 2. Validate\n",
+        "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "WQPtK1QYVaD_",
+        "outputId": "9d751d8c-bee8-4339-cf30-9854ca530449"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels-segments.zip  ...\n",
+            "Downloading http://images.cocodataset.org/zips/val2017.zip ...\n",
+            "######################################################################## 100.0%\n",
+            "######################################################################## 100.0%\n"
+          ]
+        }
+      ],
+      "source": [
+        "# Download COCO val\n",
+        "!bash data/scripts/get_coco.sh --val --segments  # download (780M - 5000 images)"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "X58w8JLpMnjH",
+        "outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n",
+            "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
+            "\n",
+            "Fusing layers... \n",
+            "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
+            "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n",
+            "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n",
+            "                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95)     Mask(P          R      mAP50  mAP50-95): 100% 157/157 [01:54<00:00,  1.37it/s]\n",
+            "                   all       5000      36335      0.673      0.517      0.566      0.373      0.672       0.49      0.532      0.319\n",
+            "Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n",
+            "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n"
+          ]
+        }
+      ],
+      "source": [
+        "# Validate YOLOv5s-seg on COCO val\n",
+        "!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "ZY2VXXXu74w5"
+      },
+      "source": [
+        "# 3. Train\n",
+        "\n",
+        "<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"1000\" src=\"https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png\"/></a></p>\n",
+        "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n",
+        "<br><br>\n",
+        "\n",
+        "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n",
+        "\n",
+        "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n",
+        "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n",
+        "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n",
+        "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n",
+        "<br><br>\n",
+        "\n",
+        "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n",
+        "\n",
+        "## Train on Custom Data with Roboflow 🌟 NEW\n",
+        "\n",
+        "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n",
+        "\n",
+        "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n",
+        "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n",
+        "<br>\n",
+        "\n",
+        "<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"480\" src=\"https://robflow-public-assets.s3.amazonaws.com/how-to-train-yolov5-segmentation-annotation.gif\"/></a></p>Label images lightning fast (including with model-assisted labeling)"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "i3oKtE4g-aNn"
+      },
+      "outputs": [],
+      "source": [
+        "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n",
+        "logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\n",
+        "\n",
+        "if logger == 'Comet':\n",
+        "  %pip install -q comet_ml\n",
+        "  import comet_ml; comet_ml.init()\n",
+        "elif logger == 'ClearML':\n",
+        "  %pip install -q clearml\n",
+        "  import clearml; clearml.browser_login()\n",
+        "elif logger == 'TensorBoard':\n",
+        "  %load_ext tensorboard\n",
+        "  %tensorboard --logdir runs/train"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "1NcFxRcFdJ_O",
+        "outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n",
+            "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
+            "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
+            "\n",
+            "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
+            "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n",
+            "\n",
+            "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n",
+            "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n",
+            "100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n",
+            "Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n",
+            "\n",
+            "                 from  n    params  module                                  arguments                     \n",
+            "  0                -1  1      3520  models.common.Conv                      [3, 32, 6, 2, 2]              \n",
+            "  1                -1  1     18560  models.common.Conv                      [32, 64, 3, 2]                \n",
+            "  2                -1  1     18816  models.common.C3                        [64, 64, 1]                   \n",
+            "  3                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               \n",
+            "  4                -1  2    115712  models.common.C3                        [128, 128, 2]                 \n",
+            "  5                -1  1    295424  models.common.Conv                      [128, 256, 3, 2]              \n",
+            "  6                -1  3    625152  models.common.C3                        [256, 256, 3]                 \n",
+            "  7                -1  1   1180672  models.common.Conv                      [256, 512, 3, 2]              \n",
+            "  8                -1  1   1182720  models.common.C3                        [512, 512, 1]                 \n",
+            "  9                -1  1    656896  models.common.SPPF                      [512, 512, 5]                 \n",
+            " 10                -1  1    131584  models.common.Conv                      [512, 256, 1, 1]              \n",
+            " 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
+            " 12           [-1, 6]  1         0  models.common.Concat                    [1]                           \n",
+            " 13                -1  1    361984  models.common.C3                        [512, 256, 1, False]          \n",
+            " 14                -1  1     33024  models.common.Conv                      [256, 128, 1, 1]              \n",
+            " 15                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          \n",
+            " 16           [-1, 4]  1         0  models.common.Concat                    [1]                           \n",
+            " 17                -1  1     90880  models.common.C3                        [256, 128, 1, False]          \n",
+            " 18                -1  1    147712  models.common.Conv                      [128, 128, 3, 2]              \n",
+            " 19          [-1, 14]  1         0  models.common.Concat                    [1]                           \n",
+            " 20                -1  1    296448  models.common.C3                        [256, 256, 1, False]          \n",
+            " 21                -1  1    590336  models.common.Conv                      [256, 256, 3, 2]              \n",
+            " 22          [-1, 10]  1         0  models.common.Concat                    [1]                           \n",
+            " 23                -1  1   1182720  models.common.C3                        [512, 512, 1, False]          \n",
+            " 24      [17, 20, 23]  1    615133  models.yolo.Segment                     [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n",
+            "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n",
+            "\n",
+            "Transferred 367/367 items from yolov5s-seg.pt\n",
+            "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
+            "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n",
+            "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
+            "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n",
+            "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n",
+            "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n",
+            "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\n",
+            "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:01<00:00, 98.90it/s]\n",
+            "\n",
+            "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
+            "Plotting labels to runs/train-seg/exp/labels.jpg... \n",
+            "Image sizes 640 train, 640 val\n",
+            "Using 2 dataloader workers\n",
+            "Logging results to \u001b[1mruns/train-seg/exp\u001b[0m\n",
+            "Starting training for 3 epochs...\n",
+            "\n",
+            "      Epoch    GPU_mem   box_loss   seg_loss   obj_loss   cls_loss  Instances       Size\n",
+            "        0/2      4.92G     0.0417    0.04646    0.06066    0.02126        192        640: 100% 8/8 [00:08<00:00,  1.10s/it]\n",
+            "                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95)     Mask(P          R      mAP50  mAP50-95): 100% 4/4 [00:02<00:00,  1.81it/s]\n",
+            "                   all        128        929      0.737      0.649      0.715      0.492      0.719      0.617      0.658      0.408\n",
+            "\n",
+            "      Epoch    GPU_mem   box_loss   seg_loss   obj_loss   cls_loss  Instances       Size\n",
+            "        1/2      6.29G    0.04157    0.04503    0.05772    0.01777        208        640: 100% 8/8 [00:09<00:00,  1.21s/it]\n",
+            "                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95)     Mask(P          R      mAP50  mAP50-95): 100% 4/4 [00:02<00:00,  1.87it/s]\n",
+            "                   all        128        929      0.756      0.674      0.738      0.506      0.725       0.64       0.68      0.422\n",
+            "\n",
+            "      Epoch    GPU_mem   box_loss   seg_loss   obj_loss   cls_loss  Instances       Size\n",
+            "        2/2      6.29G     0.0425    0.04793    0.06784    0.01863        161        640: 100% 8/8 [00:03<00:00,  2.02it/s]\n",
+            "                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95)     Mask(P          R      mAP50  mAP50-95): 100% 4/4 [00:02<00:00,  1.88it/s]\n",
+            "                   all        128        929      0.736      0.694      0.747      0.522      0.769      0.622      0.683      0.427\n",
+            "\n",
+            "3 epochs completed in 0.009 hours.\n",
+            "Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\n",
+            "Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\n",
+            "\n",
+            "Validating runs/train-seg/exp/weights/best.pt...\n",
+            "Fusing layers... \n",
+            "Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
+            "                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95)     Mask(P          R      mAP50  mAP50-95): 100% 4/4 [00:06<00:00,  1.59s/it]\n",
+            "                   all        128        929      0.738      0.694      0.746      0.522      0.759      0.625      0.682      0.426\n",
+            "                person        128        254      0.845      0.756      0.836       0.55      0.861      0.669      0.759      0.407\n",
+            "               bicycle        128          6      0.475      0.333      0.549      0.341      0.711      0.333      0.526      0.322\n",
+            "                   car        128         46      0.612      0.565      0.539      0.257      0.555      0.435      0.477      0.171\n",
+            "            motorcycle        128          5       0.73        0.8      0.752      0.571      0.747        0.8      0.752       0.42\n",
+            "              airplane        128          6          1      0.943      0.995      0.732       0.92      0.833      0.839      0.555\n",
+            "                   bus        128          7      0.677      0.714      0.722      0.653      0.711      0.714      0.722      0.593\n",
+            "                 train        128          3          1      0.951      0.995      0.551          1      0.884      0.995      0.781\n",
+            "                 truck        128         12      0.555      0.417      0.457      0.285      0.624      0.417      0.397      0.277\n",
+            "                  boat        128          6      0.624        0.5      0.584      0.186          1      0.326      0.412      0.133\n",
+            "         traffic light        128         14      0.513      0.302      0.411      0.247      0.435      0.214      0.376      0.251\n",
+            "             stop sign        128          2      0.824          1      0.995      0.796      0.906          1      0.995      0.747\n",
+            "                 bench        128          9       0.75      0.667      0.763      0.367      0.724      0.585      0.698      0.209\n",
+            "                  bird        128         16      0.961          1      0.995      0.686      0.918      0.938       0.91      0.525\n",
+            "                   cat        128          4      0.771      0.857      0.945      0.752       0.76        0.8      0.945      0.728\n",
+            "                   dog        128          9      0.987      0.778      0.963      0.681          1      0.705       0.89      0.574\n",
+            "                 horse        128          2      0.703          1      0.995      0.697      0.759          1      0.995      0.249\n",
+            "              elephant        128         17      0.916      0.882       0.93      0.691      0.811      0.765      0.829      0.537\n",
+            "                  bear        128          1      0.664          1      0.995      0.995      0.701          1      0.995      0.895\n",
+            "                 zebra        128          4      0.864          1      0.995      0.921      0.879          1      0.995      0.804\n",
+            "               giraffe        128          9      0.883      0.889       0.94      0.683      0.845      0.778       0.78      0.463\n",
+            "              backpack        128          6          1       0.59      0.701      0.372          1      0.474       0.52      0.252\n",
+            "              umbrella        128         18      0.654      0.839      0.887       0.52      0.517      0.556      0.427      0.229\n",
+            "               handbag        128         19       0.54      0.211      0.408      0.221      0.796      0.206      0.396      0.196\n",
+            "                   tie        128          7      0.864      0.857      0.857      0.577      0.925      0.857      0.857      0.534\n",
+            "              suitcase        128          4      0.716          1      0.945      0.647      0.767          1      0.945      0.634\n",
+            "               frisbee        128          5      0.708        0.8      0.761      0.643      0.737        0.8      0.761      0.501\n",
+            "                  skis        128          1      0.691          1      0.995      0.796      0.761          1      0.995      0.199\n",
+            "             snowboard        128          7      0.918      0.857      0.904      0.604       0.32      0.286      0.235      0.137\n",
+            "           sports ball        128          6      0.902      0.667      0.701      0.466      0.727        0.5      0.497      0.471\n",
+            "                  kite        128         10      0.586        0.4      0.511      0.231      0.663      0.394      0.417      0.139\n",
+            "          baseball bat        128          4      0.359        0.5      0.401      0.169      0.631        0.5      0.526      0.133\n",
+            "        baseball glove        128          7          1      0.519       0.58      0.327      0.687      0.286      0.455      0.328\n",
+            "            skateboard        128          5      0.729        0.8      0.862      0.631      0.599        0.6      0.604      0.379\n",
+            "         tennis racket        128          7       0.57      0.714      0.645      0.448      0.608      0.714      0.645      0.412\n",
+            "                bottle        128         18      0.469      0.393      0.537      0.357      0.661      0.389      0.543      0.349\n",
+            "            wine glass        128         16      0.677      0.938      0.866      0.441       0.53      0.625       0.67      0.334\n",
+            "                   cup        128         36      0.777      0.722      0.812      0.466      0.725      0.583      0.762      0.467\n",
+            "                  fork        128          6      0.948      0.333      0.425       0.27      0.527      0.167       0.18      0.102\n",
+            "                 knife        128         16      0.757      0.587      0.669      0.458       0.79        0.5      0.552       0.34\n",
+            "                 spoon        128         22       0.74      0.364      0.559      0.269      0.925      0.364      0.513      0.213\n",
+            "                  bowl        128         28      0.766      0.714      0.725      0.559      0.803      0.584      0.665      0.353\n",
+            "                banana        128          1      0.408          1      0.995      0.398      0.539          1      0.995      0.497\n",
+            "              sandwich        128          2          1          0      0.695      0.536          1          0      0.498      0.448\n",
+            "                orange        128          4      0.467          1      0.995      0.693      0.518          1      0.995      0.663\n",
+            "              broccoli        128         11      0.462      0.455      0.383      0.259      0.548      0.455      0.384      0.256\n",
+            "                carrot        128         24      0.631      0.875       0.77      0.533      0.757      0.909      0.853      0.499\n",
+            "               hot dog        128          2      0.555          1      0.995      0.995      0.578          1      0.995      0.796\n",
+            "                 pizza        128          5       0.89        0.8      0.962      0.796          1      0.778      0.962      0.766\n",
+            "                 donut        128         14      0.695          1      0.893      0.772      0.704          1      0.893      0.696\n",
+            "                  cake        128          4      0.826          1      0.995       0.92      0.862          1      0.995      0.846\n",
+            "                 chair        128         35       0.53      0.571      0.613      0.336       0.67        0.6      0.538      0.271\n",
+            "                 couch        128          6      0.972      0.667      0.833      0.627          1       0.62      0.696      0.394\n",
+            "          potted plant        128         14        0.7      0.857      0.883      0.552      0.836      0.857      0.883      0.473\n",
+            "                   bed        128          3      0.979      0.667       0.83      0.366          1          0       0.83      0.373\n",
+            "          dining table        128         13      0.775      0.308      0.505      0.364      0.644      0.231       0.25     0.0804\n",
+            "                toilet        128          2      0.836          1      0.995      0.846      0.887          1      0.995      0.797\n",
+            "                    tv        128          2        0.6          1      0.995      0.846      0.655          1      0.995      0.896\n",
+            "                laptop        128          3      0.822      0.333      0.445      0.307          1          0      0.392       0.12\n",
+            "                 mouse        128          2          1          0          0          0          1          0          0          0\n",
+            "                remote        128          8      0.745        0.5       0.62      0.459      0.821        0.5      0.624      0.449\n",
+            "            cell phone        128          8      0.686      0.375      0.502      0.272      0.488       0.25       0.28      0.132\n",
+            "             microwave        128          3      0.831          1      0.995      0.722      0.867          1      0.995      0.592\n",
+            "                  oven        128          5      0.439        0.4      0.435      0.294      0.823        0.6      0.645      0.418\n",
+            "                  sink        128          6      0.677        0.5      0.565      0.448      0.722        0.5       0.46      0.362\n",
+            "          refrigerator        128          5      0.533        0.8      0.783      0.524      0.558        0.8      0.783      0.527\n",
+            "                  book        128         29      0.732      0.379      0.423      0.196       0.69      0.207       0.38      0.131\n",
+            "                 clock        128          9      0.889      0.778      0.917      0.677      0.908      0.778      0.875      0.604\n",
+            "                  vase        128          2      0.375          1      0.995      0.995      0.455          1      0.995      0.796\n",
+            "              scissors        128          1          1          0     0.0166    0.00166          1          0          0          0\n",
+            "            teddy bear        128         21      0.813      0.829      0.841      0.457      0.826      0.678      0.786      0.422\n",
+            "            toothbrush        128          5      0.806          1      0.995      0.733      0.991          1      0.995      0.628\n",
+            "Results saved to \u001b[1mruns/train-seg/exp\u001b[0m\n"
+          ]
+        }
+      ],
+      "source": [
+        "# Train YOLOv5s on COCO128 for 3 epochs\n",
+        "!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "15glLzbQx5u0"
+      },
+      "source": [
+        "# 4. Visualize"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "nWOsI5wJR1o3"
+      },
+      "source": [
+        "## Comet Logging and Visualization 🌟 NEW\n",
+        "\n",
+        "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n",
+        "\n",
+        "Getting started is easy:\n",
+        "```shell\n",
+        "pip install comet_ml  # 1. install\n",
+        "export COMET_API_KEY=<Your API Key>  # 2. paste API key\n",
+        "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt  # 3. train\n",
+        "```\n",
+        "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n",
+        "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n",
+        "\n",
+        "<a href=\"https://bit.ly/yolov5-readme-comet2\">\n",
+        "<img alt=\"Comet Dashboard\" src=\"https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png\" width=\"1280\"/></a>"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "Lay2WsTjNJzP"
+      },
+      "source": [
+        "## ClearML Logging and Automation 🌟 NEW\n",
+        "\n",
+        "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n",
+        "\n",
+        "- `pip install clearml`\n",
+        "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n",
+        "\n",
+        "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n",
+        "\n",
+        "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n",
+        "\n",
+        "<a href=\"https://cutt.ly/yolov5-notebook-clearml\">\n",
+        "<img alt=\"ClearML Experiment Management UI\" src=\"https://github.com/thepycoder/clearml_screenshots/raw/main/scalars.jpg\" width=\"1280\"/></a>"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "-WPvRbS5Swl6"
+      },
+      "source": [
+        "## Local Logging\n",
+        "\n",
+        "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
+        "\n",
+        "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n",
+        "\n",
+        "<img alt=\"Local logging results\" src=\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\" width=\"1280\"/>\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "Zelyeqbyt3GD"
+      },
+      "source": [
+        "# Environments\n",
+        "\n",
+        "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
+        "\n",
+        "- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
+        "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n",
+        "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n",
+        "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "6Qu7Iesl0p54"
+      },
+      "source": [
+        "# Status\n",
+        "\n",
+        "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n",
+        "\n",
+        "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "IEijrePND_2I"
+      },
+      "source": [
+        "# Appendix\n",
+        "\n",
+        "Additional content below."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "GMusP4OAxFu6"
+      },
+      "outputs": [],
+      "source": [
+        "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
+        "import torch\n",
+        "\n",
+        "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg')  # yolov5n - yolov5x6 or custom\n",
+        "im = 'https://ultralytics.com/images/zidane.jpg'  # file, Path, PIL.Image, OpenCV, nparray, list\n",
+        "results = model(im)  # inference\n",
+        "results.print()  # or .show(), .save(), .crop(), .pandas(), etc."
+      ]
+    }
+  ],
+  "metadata": {
+    "accelerator": "GPU",
+    "colab": {
+      "name": "YOLOv5 Segmentation Tutorial",
+      "provenance": [],
+      "toc_visible": true
+    },
+    "kernelspec": {
+      "display_name": "Python 3 (ipykernel)",
+      "language": "python",
+      "name": "python3"
+    },
+    "language_info": {
+      "codemirror_mode": {
+        "name": "ipython",
+        "version": 3
+      },
+      "file_extension": ".py",
+      "mimetype": "text/x-python",
+      "name": "python",
+      "nbconvert_exporter": "python",
+      "pygments_lexer": "ipython3",
+      "version": "3.7.12"
+    }
+  },
+  "nbformat": 4,
+  "nbformat_minor": 0
+}

+ 473 - 0
segment/val.py

@@ -0,0 +1,473 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Validate a trained YOLOv5 segment model on a segment dataset
+
+Usage:
+    $ bash data/scripts/get_coco.sh --val --segments  # download COCO-segments val split (1G, 5000 images)
+    $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640  # validate COCO-segments
+
+Usage - formats:
+    $ python segment/val.py --weights yolov5s-seg.pt                 # PyTorch
+                                      yolov5s-seg.torchscript        # TorchScript
+                                      yolov5s-seg.onnx               # ONNX Runtime or OpenCV DNN with --dnn
+                                      yolov5s-seg_openvino_label     # OpenVINO
+                                      yolov5s-seg.engine             # TensorRT
+                                      yolov5s-seg.mlmodel            # CoreML (macOS-only)
+                                      yolov5s-seg_saved_model        # TensorFlow SavedModel
+                                      yolov5s-seg.pb                 # TensorFlow GraphDef
+                                      yolov5s-seg.tflite             # TensorFlow Lite
+                                      yolov5s-seg_edgetpu.tflite     # TensorFlow Edge TPU
+                                      yolov5s-seg_paddle_model       # PaddlePaddle
+"""
+
+import argparse
+import json
+import os
+import subprocess
+import sys
+from multiprocessing.pool import ThreadPool
+from pathlib import Path
+
+import numpy as np
+import torch
+from tqdm import tqdm
+
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1]  # YOLOv5 root directory
+if str(ROOT) not in sys.path:
+    sys.path.append(str(ROOT))  # add ROOT to PATH
+ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
+
+import torch.nn.functional as F
+
+from models.common import DetectMultiBackend
+from models.yolo import SegmentationModel
+from utils.callbacks import Callbacks
+from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size,
+                           check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path,
+                           non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
+from utils.metrics import ConfusionMatrix, box_iou
+from utils.plots import output_to_target, plot_val_study
+from utils.segment.dataloaders import create_dataloader
+from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image
+from utils.segment.metrics import Metrics, ap_per_class_box_and_mask
+from utils.segment.plots import plot_images_and_masks
+from utils.torch_utils import de_parallel, select_device, smart_inference_mode
+
+
+def save_one_txt(predn, save_conf, shape, file):
+    # Save one txt result
+    gn = torch.tensor(shape)[[1, 0, 1, 0]]  # normalization gain whwh
+    for *xyxy, conf, cls in predn.tolist():
+        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
+        line = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label format
+        with open(file, 'a') as f:
+            f.write(('%g ' * len(line)).rstrip() % line + '\n')
+
+
+def save_one_json(predn, jdict, path, class_map, pred_masks):
+    # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
+    from pycocotools.mask import encode
+
+    def single_encode(x):
+        rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0]
+        rle['counts'] = rle['counts'].decode('utf-8')
+        return rle
+
+    image_id = int(path.stem) if path.stem.isnumeric() else path.stem
+    box = xyxy2xywh(predn[:, :4])  # xywh
+    box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
+    pred_masks = np.transpose(pred_masks, (2, 0, 1))
+    with ThreadPool(NUM_THREADS) as pool:
+        rles = pool.map(single_encode, pred_masks)
+    for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):
+        jdict.append({
+            'image_id': image_id,
+            'category_id': class_map[int(p[5])],
+            'bbox': [round(x, 3) for x in b],
+            'score': round(p[4], 5),
+            'segmentation': rles[i]})
+
+
+def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False):
+    """
+    Return correct prediction matrix
+    Arguments:
+        detections (array[N, 6]), x1, y1, x2, y2, conf, class
+        labels (array[M, 5]), class, x1, y1, x2, y2
+    Returns:
+        correct (array[N, 10]), for 10 IoU levels
+    """
+    if masks:
+        if overlap:
+            nl = len(labels)
+            index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1
+            gt_masks = gt_masks.repeat(nl, 1, 1)  # shape(1,640,640) -> (n,640,640)
+            gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
+        if gt_masks.shape[1:] != pred_masks.shape[1:]:
+            gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0]
+            gt_masks = gt_masks.gt_(0.5)
+        iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))
+    else:  # boxes
+        iou = box_iou(labels[:, 1:], detections[:, :4])
+
+    correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
+    correct_class = labels[:, 0:1] == detections[:, 5]
+    for i in range(len(iouv)):
+        x = torch.where((iou >= iouv[i]) & correct_class)  # IoU > threshold and classes match
+        if x[0].shape[0]:
+            matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()  # [label, detect, iou]
+            if x[0].shape[0] > 1:
+                matches = matches[matches[:, 2].argsort()[::-1]]
+                matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
+                # matches = matches[matches[:, 2].argsort()[::-1]]
+                matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
+            correct[matches[:, 1].astype(int), i] = True
+    return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
+
+
+@smart_inference_mode()
+def run(
+        data,
+        weights=None,  # model.pt path(s)
+        batch_size=32,  # batch size
+        imgsz=640,  # inference size (pixels)
+        conf_thres=0.001,  # confidence threshold
+        iou_thres=0.6,  # NMS IoU threshold
+        max_det=300,  # maximum detections per image
+        task='val',  # train, val, test, speed or study
+        device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
+        workers=8,  # max dataloader workers (per RANK in DDP mode)
+        single_cls=False,  # treat as single-class dataset
+        augment=False,  # augmented inference
+        verbose=False,  # verbose output
+        save_txt=False,  # save results to *.txt
+        save_hybrid=False,  # save label+prediction hybrid results to *.txt
+        save_conf=False,  # save confidences in --save-txt labels
+        save_json=False,  # save a COCO-JSON results file
+        project=ROOT / 'runs/val-seg',  # save to project/name
+        name='exp',  # save to project/name
+        exist_ok=False,  # existing project/name ok, do not increment
+        half=True,  # use FP16 half-precision inference
+        dnn=False,  # use OpenCV DNN for ONNX inference
+        model=None,
+        dataloader=None,
+        save_dir=Path(''),
+        plots=True,
+        overlap=False,
+        mask_downsample_ratio=1,
+        compute_loss=None,
+        callbacks=Callbacks(),
+):
+    if save_json:
+        check_requirements('pycocotools>=2.0.6')
+        process = process_mask_native  # more accurate
+    else:
+        process = process_mask  # faster
+
+    # Initialize/load model and set device
+    training = model is not None
+    if training:  # called by train.py
+        device, pt, jit, engine = next(model.parameters()).device, True, False, False  # get model device, PyTorch model
+        half &= device.type != 'cpu'  # half precision only supported on CUDA
+        model.half() if half else model.float()
+        nm = de_parallel(model).model[-1].nm  # number of masks
+    else:  # called directly
+        device = select_device(device, batch_size=batch_size)
+
+        # Directories
+        save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
+        (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir
+
+        # Load model
+        model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
+        stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
+        imgsz = check_img_size(imgsz, s=stride)  # check image size
+        half = model.fp16  # FP16 supported on limited backends with CUDA
+        nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32  # number of masks
+        if engine:
+            batch_size = model.batch_size
+        else:
+            device = model.device
+            if not (pt or jit):
+                batch_size = 1  # export.py models default to batch-size 1
+                LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
+
+        # Data
+        data = check_dataset(data)  # check
+
+    # Configure
+    model.eval()
+    cuda = device.type != 'cpu'
+    is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt')  # COCO dataset
+    nc = 1 if single_cls else int(data['nc'])  # number of classes
+    iouv = torch.linspace(0.5, 0.95, 10, device=device)  # iou vector for mAP@0.5:0.95
+    niou = iouv.numel()
+
+    # Dataloader
+    if not training:
+        if pt and not single_cls:  # check --weights are trained on --data
+            ncm = model.model.nc
+            assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
+                              f'classes). Pass correct combination of --weights and --data that are trained together.'
+        model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz))  # warmup
+        pad, rect = (0.0, False) if task == 'speed' else (0.5, pt)  # square inference for benchmarks
+        task = task if task in ('train', 'val', 'test') else 'val'  # path to train/val/test images
+        dataloader = create_dataloader(data[task],
+                                       imgsz,
+                                       batch_size,
+                                       stride,
+                                       single_cls,
+                                       pad=pad,
+                                       rect=rect,
+                                       workers=workers,
+                                       prefix=colorstr(f'{task}: '),
+                                       overlap_mask=overlap,
+                                       mask_downsample_ratio=mask_downsample_ratio)[0]
+
+    seen = 0
+    confusion_matrix = ConfusionMatrix(nc=nc)
+    names = model.names if hasattr(model, 'names') else model.module.names  # get class names
+    if isinstance(names, (list, tuple)):  # old format
+        names = dict(enumerate(names))
+    class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
+    s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R',
+                                  'mAP50', 'mAP50-95)')
+    dt = Profile(), Profile(), Profile()
+    metrics = Metrics()
+    loss = torch.zeros(4, device=device)
+    jdict, stats = [], []
+    # callbacks.run('on_val_start')
+    pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT)  # progress bar
+    for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar):
+        # callbacks.run('on_val_batch_start')
+        with dt[0]:
+            if cuda:
+                im = im.to(device, non_blocking=True)
+                targets = targets.to(device)
+                masks = masks.to(device)
+            masks = masks.float()
+            im = im.half() if half else im.float()  # uint8 to fp16/32
+            im /= 255  # 0 - 255 to 0.0 - 1.0
+            nb, _, height, width = im.shape  # batch size, channels, height, width
+
+        # Inference
+        with dt[1]:
+            preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None)
+
+        # Loss
+        if compute_loss:
+            loss += compute_loss((train_out, protos), targets, masks)[1]  # box, obj, cls
+
+        # NMS
+        targets[:, 2:] *= torch.tensor((width, height, width, height), device=device)  # to pixels
+        lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else []  # for autolabelling
+        with dt[2]:
+            preds = non_max_suppression(preds,
+                                        conf_thres,
+                                        iou_thres,
+                                        labels=lb,
+                                        multi_label=True,
+                                        agnostic=single_cls,
+                                        max_det=max_det,
+                                        nm=nm)
+
+        # Metrics
+        plot_masks = []  # masks for plotting
+        for si, (pred, proto) in enumerate(zip(preds, protos)):
+            labels = targets[targets[:, 0] == si, 1:]
+            nl, npr = labels.shape[0], pred.shape[0]  # number of labels, predictions
+            path, shape = Path(paths[si]), shapes[si][0]
+            correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device)  # init
+            correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device)  # init
+            seen += 1
+
+            if npr == 0:
+                if nl:
+                    stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0]))
+                    if plots:
+                        confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
+                continue
+
+            # Masks
+            midx = [si] if overlap else targets[:, 0] == si
+            gt_masks = masks[midx]
+            pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:])
+
+            # Predictions
+            if single_cls:
+                pred[:, 5] = 0
+            predn = pred.clone()
+            scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1])  # native-space pred
+
+            # Evaluate
+            if nl:
+                tbox = xywh2xyxy(labels[:, 1:5])  # target boxes
+                scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1])  # native-space labels
+                labelsn = torch.cat((labels[:, 0:1], tbox), 1)  # native-space labels
+                correct_bboxes = process_batch(predn, labelsn, iouv)
+                correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True)
+                if plots:
+                    confusion_matrix.process_batch(predn, labelsn)
+            stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0]))  # (conf, pcls, tcls)
+
+            pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
+            if plots and batch_i < 3:
+                plot_masks.append(pred_masks[:15])  # filter top 15 to plot
+
+            # Save/log
+            if save_txt:
+                save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
+            if save_json:
+                pred_masks = scale_image(im[si].shape[1:],
+                                         pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1])
+                save_one_json(predn, jdict, path, class_map, pred_masks)  # append to COCO-JSON dictionary
+            # callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
+
+        # Plot images
+        if plots and batch_i < 3:
+            if len(plot_masks):
+                plot_masks = torch.cat(plot_masks, dim=0)
+            plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names)
+            plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths,
+                                  save_dir / f'val_batch{batch_i}_pred.jpg', names)  # pred
+
+        # callbacks.run('on_val_batch_end')
+
+    # Compute metrics
+    stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)]  # to numpy
+    if len(stats) and stats[0].any():
+        results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names)
+        metrics.update(results)
+    nt = np.bincount(stats[4].astype(int), minlength=nc)  # number of targets per class
+
+    # Print results
+    pf = '%22s' + '%11i' * 2 + '%11.3g' * 8  # print format
+    LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results()))
+    if nt.sum() == 0:
+        LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
+
+    # Print results per class
+    if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
+        for i, c in enumerate(metrics.ap_class_index):
+            LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i)))
+
+    # Print speeds
+    t = tuple(x.t / seen * 1E3 for x in dt)  # speeds per image
+    if not training:
+        shape = (batch_size, 3, imgsz, imgsz)
+        LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
+
+    # Plots
+    if plots:
+        confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
+    # callbacks.run('on_val_end')
+
+    mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results()
+
+    # Save JSON
+    if save_json and len(jdict):
+        w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else ''  # weights
+        anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json'))  # annotations
+        pred_json = str(save_dir / f'{w}_predictions.json')  # predictions
+        LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
+        with open(pred_json, 'w') as f:
+            json.dump(jdict, f)
+
+        try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
+            from pycocotools.coco import COCO
+            from pycocotools.cocoeval import COCOeval
+
+            anno = COCO(anno_json)  # init annotations api
+            pred = anno.loadRes(pred_json)  # init predictions api
+            results = []
+            for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'):
+                if is_coco:
+                    eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files]  # img ID to evaluate
+                eval.evaluate()
+                eval.accumulate()
+                eval.summarize()
+                results.extend(eval.stats[:2])  # update results (mAP@0.5:0.95, mAP@0.5)
+            map_bbox, map50_bbox, map_mask, map50_mask = results
+        except Exception as e:
+            LOGGER.info(f'pycocotools unable to run: {e}')
+
+    # Return results
+    model.float()  # for training
+    if not training:
+        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
+        LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
+    final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask
+    return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t
+
+
+def parse_opt():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
+    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)')
+    parser.add_argument('--batch-size', type=int, default=32, help='batch size')
+    parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
+    parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
+    parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
+    parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
+    parser.add_argument('--task', default='val', help='train, val, test, speed or study')
+    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+    parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
+    parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
+    parser.add_argument('--augment', action='store_true', help='augmented inference')
+    parser.add_argument('--verbose', action='store_true', help='report mAP by class')
+    parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
+    parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
+    parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
+    parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
+    parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name')
+    parser.add_argument('--name', default='exp', help='save to project/name')
+    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+    parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
+    parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
+    opt = parser.parse_args()
+    opt.data = check_yaml(opt.data)  # check YAML
+    # opt.save_json |= opt.data.endswith('coco.yaml')
+    opt.save_txt |= opt.save_hybrid
+    print_args(vars(opt))
+    return opt
+
+
+def main(opt):
+    check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
+
+    if opt.task in ('train', 'val', 'test'):  # run normally
+        if opt.conf_thres > 0.001:  # https://github.com/ultralytics/yolov5/issues/1466
+            LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
+        if opt.save_hybrid:
+            LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone')
+        run(**vars(opt))
+
+    else:
+        weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
+        opt.half = torch.cuda.is_available() and opt.device != 'cpu'  # FP16 for fastest results
+        if opt.task == 'speed':  # speed benchmarks
+            # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
+            opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
+            for opt.weights in weights:
+                run(**vars(opt), plots=False)
+
+        elif opt.task == 'study':  # speed vs mAP benchmarks
+            # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
+            for opt.weights in weights:
+                f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt'  # filename to save to
+                x, y = list(range(256, 1536 + 128, 128)), []  # x axis (image sizes), y axis
+                for opt.imgsz in x:  # img-size
+                    LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
+                    r, _, t = run(**vars(opt), plots=False)
+                    y.append(r + t)  # results and times
+                np.savetxt(f, y, fmt='%10.4g')  # save
+            subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt'])
+            plot_val_study(x=x)  # plot
+        else:
+            raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")')
+
+
+if __name__ == '__main__':
+    opt = parse_opt()
+    main(opt)

+ 86 - 0
utils/__init__.py

@@ -0,0 +1,86 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+utils/initialization
+"""
+
+import contextlib
+import platform
+import threading
+
+
+def emojis(str=''):
+    # Return platform-dependent emoji-safe version of string
+    return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
+
+
+class TryExcept(contextlib.ContextDecorator):
+    # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
+    def __init__(self, msg=''):
+        self.msg = msg
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, exc_type, value, traceback):
+        if value:
+            print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
+        return True
+
+
+def threaded(func):
+    # Multi-threads a target function and returns thread. Usage: @threaded decorator
+    def wrapper(*args, **kwargs):
+        thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
+        thread.start()
+        return thread
+
+    return wrapper
+
+
+def join_threads(verbose=False):
+    # Join all daemon threads, i.e. atexit.register(lambda: join_threads())
+    main_thread = threading.current_thread()
+    for t in threading.enumerate():
+        if t is not main_thread:
+            if verbose:
+                print(f'Joining thread {t.name}')
+            t.join()
+
+
+def notebook_init(verbose=True):
+    # Check system software and hardware
+    print('Checking setup...')
+
+    import os
+    import shutil
+
+    from ultralytics.yolo.utils.checks import check_requirements
+
+    from utils.general import check_font, is_colab
+    from utils.torch_utils import select_device  # imports
+
+    check_font()
+
+    import psutil
+
+    if check_requirements('wandb', install=False):
+        os.system('pip uninstall -y wandb')  # eliminate unexpected account creation prompt with infinite hang
+    if is_colab():
+        shutil.rmtree('/content/sample_data', ignore_errors=True)  # remove colab /sample_data directory
+
+    # System info
+    display = None
+    if verbose:
+        gb = 1 << 30  # bytes to GiB (1024 ** 3)
+        ram = psutil.virtual_memory().total
+        total, used, free = shutil.disk_usage('/')
+        with contextlib.suppress(Exception):  # clear display if ipython is installed
+            from IPython import display
+            display.clear_output()
+        s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)'
+    else:
+        s = ''
+
+    select_device(newline=False)
+    print(emojis(f'Setup complete ✅ {s}'))
+    return display

BIN
utils/__pycache__/__init__.cpython-38.pyc


BIN
utils/__pycache__/augmentations.cpython-38.pyc


BIN
utils/__pycache__/autoanchor.cpython-38.pyc


BIN
utils/__pycache__/autobatch.cpython-38.pyc


BIN
utils/__pycache__/callbacks.cpython-38.pyc


BIN
utils/__pycache__/dataloaders.cpython-38.pyc


BIN
utils/__pycache__/downloads.cpython-38.pyc


BIN
utils/__pycache__/general.cpython-38.pyc


BIN
utils/__pycache__/loss.cpython-38.pyc


BIN
utils/__pycache__/metrics.cpython-38.pyc


BIN
utils/__pycache__/plots.cpython-38.pyc


BIN
utils/__pycache__/renwu.cpython-38.pyc


BIN
utils/__pycache__/torch_utils.cpython-38.pyc


+ 103 - 0
utils/activations.py

@@ -0,0 +1,103 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Activation functions
+"""
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class SiLU(nn.Module):
+    # SiLU activation https://arxiv.org/pdf/1606.08415.pdf
+    @staticmethod
+    def forward(x):
+        return x * torch.sigmoid(x)
+
+
+class Hardswish(nn.Module):
+    # Hard-SiLU activation
+    @staticmethod
+    def forward(x):
+        # return x * F.hardsigmoid(x)  # for TorchScript and CoreML
+        return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0  # for TorchScript, CoreML and ONNX
+
+
+class Mish(nn.Module):
+    # Mish activation https://github.com/digantamisra98/Mish
+    @staticmethod
+    def forward(x):
+        return x * F.softplus(x).tanh()
+
+
+class MemoryEfficientMish(nn.Module):
+    # Mish activation memory-efficient
+    class F(torch.autograd.Function):
+
+        @staticmethod
+        def forward(ctx, x):
+            ctx.save_for_backward(x)
+            return x.mul(torch.tanh(F.softplus(x)))  # x * tanh(ln(1 + exp(x)))
+
+        @staticmethod
+        def backward(ctx, grad_output):
+            x = ctx.saved_tensors[0]
+            sx = torch.sigmoid(x)
+            fx = F.softplus(x).tanh()
+            return grad_output * (fx + x * sx * (1 - fx * fx))
+
+    def forward(self, x):
+        return self.F.apply(x)
+
+
+class FReLU(nn.Module):
+    # FReLU activation https://arxiv.org/abs/2007.11824
+    def __init__(self, c1, k=3):  # ch_in, kernel
+        super().__init__()
+        self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
+        self.bn = nn.BatchNorm2d(c1)
+
+    def forward(self, x):
+        return torch.max(x, self.bn(self.conv(x)))
+
+
+class AconC(nn.Module):
+    r""" ACON activation (activate or not)
+    AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
+    according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
+    """
+
+    def __init__(self, c1):
+        super().__init__()
+        self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
+        self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
+        self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
+
+    def forward(self, x):
+        dpx = (self.p1 - self.p2) * x
+        return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
+
+
+class MetaAconC(nn.Module):
+    r""" ACON activation (activate or not)
+    MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
+    according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
+    """
+
+    def __init__(self, c1, k=1, s=1, r=16):  # ch_in, kernel, stride, r
+        super().__init__()
+        c2 = max(r, c1 // r)
+        self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
+        self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
+        self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
+        self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
+        # self.bn1 = nn.BatchNorm2d(c2)
+        # self.bn2 = nn.BatchNorm2d(c1)
+
+    def forward(self, x):
+        y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
+        # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
+        # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y)))))  # bug/unstable
+        beta = torch.sigmoid(self.fc2(self.fc1(y)))  # bug patch BN layers removed
+        dpx = (self.p1 - self.p2) * x
+        return dpx * torch.sigmoid(beta * dpx) + self.p2 * x

+ 397 - 0
utils/augmentations.py

@@ -0,0 +1,397 @@
+# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
+"""
+Image augmentation functions
+"""
+
+import math
+import random
+
+import cv2
+import numpy as np
+import torch
+import torchvision.transforms as T
+import torchvision.transforms.functional as TF
+
+from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
+from utils.metrics import bbox_ioa
+
+IMAGENET_MEAN = 0.485, 0.456, 0.406  # RGB mean
+IMAGENET_STD = 0.229, 0.224, 0.225  # RGB standard deviation
+
+
+class Albumentations:
+    # YOLOv5 Albumentations class (optional, only used if package is installed)
+    def __init__(self, size=640):
+        self.transform = None
+        prefix = colorstr('albumentations: ')
+        try:
+            import albumentations as A
+            check_version(A.__version__, '1.0.3', hard=True)  # version requirement
+
+            T = [
+                A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0),
+                A.Blur(p=0.01),
+                A.MedianBlur(p=0.01),
+                A.ToGray(p=0.01),
+                A.CLAHE(p=0.01),
+                A.RandomBrightnessContrast(p=0.0),
+                A.RandomGamma(p=0.0),
+                A.ImageCompression(quality_lower=75, p=0.0)]  # transforms
+            self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
+
+            LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
+        except ImportError:  # package not installed, skip
+            pass
+        except Exception as e:
+            LOGGER.info(f'{prefix}{e}')
+
+    def __call__(self, im, labels, p=1.0):
+        if self.transform and random.random() < p:
+            new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0])  # transformed
+            im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
+        return im, labels
+
+
+def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
+    # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std
+    return TF.normalize(x, mean, std, inplace=inplace)
+
+
+def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD):
+    # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean
+    for i in range(3):
+        x[:, i] = x[:, i] * std[i] + mean[i]
+    return x
+
+
+def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
+    # HSV color-space augmentation
+    if hgain or sgain or vgain:
+        r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1  # random gains
+        hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
+        dtype = im.dtype  # uint8
+
+        x = np.arange(0, 256, dtype=r.dtype)
+        lut_hue = ((x * r[0]) % 180).astype(dtype)
+        lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
+        lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
+
+        im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
+        cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im)  # no return needed
+
+
+def hist_equalize(im, clahe=True, bgr=False):
+    # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255
+    yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
+    if clahe:
+        c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
+        yuv[:, :, 0] = c.apply(yuv[:, :, 0])
+    else:
+        yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0])  # equalize Y channel histogram
+    return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB)  # convert YUV image to RGB
+
+
+def replicate(im, labels):
+    # Replicate labels
+    h, w = im.shape[:2]
+    boxes = labels[:, 1:].astype(int)
+    x1, y1, x2, y2 = boxes.T
+    s = ((x2 - x1) + (y2 - y1)) / 2  # side length (pixels)
+    for i in s.argsort()[:round(s.size * 0.5)]:  # smallest indices
+        x1b, y1b, x2b, y2b = boxes[i]
+        bh, bw = y2b - y1b, x2b - x1b
+        yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw))  # offset x, y
+        x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
+        im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b]  # im4[ymin:ymax, xmin:xmax]
+        labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
+
+    return im, labels
+
+
+def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
+    # Resize and pad image while meeting stride-multiple constraints
+    shape = im.shape[:2]  # current shape [height, width]
+    if isinstance(new_shape, int):
+        new_shape = (new_shape, new_shape)
+
+    # Scale ratio (new / old)
+    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
+    if not scaleup:  # only scale down, do not scale up (for better val mAP)
+        r = min(r, 1.0)
+
+    # Compute padding
+    ratio = r, r  # width, height ratios
+    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
+    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
+    if auto:  # minimum rectangle
+        dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding
+    elif scaleFill:  # stretch
+        dw, dh = 0.0, 0.0
+        new_unpad = (new_shape[1], new_shape[0])
+        ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios
+
+    dw /= 2  # divide padding into 2 sides
+    dh /= 2
+
+    if shape[::-1] != new_unpad:  # resize
+        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
+    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
+    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
+    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
+    return im, ratio, (dw, dh)
+
+
+def random_perspective(im,
+                       targets=(),
+                       segments=(),
+                       degrees=10,
+                       translate=.1,
+                       scale=.1,
+                       shear=10,
+                       perspective=0.0,
+                       border=(0, 0)):
+    # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))
+    # targets = [cls, xyxy]
+
+    height = im.shape[0] + border[0] * 2  # shape(h,w,c)
+    width = im.shape[1] + border[1] * 2
+
+    # Center
+    C = np.eye(3)
+    C[0, 2] = -im.shape[1] / 2  # x translation (pixels)
+    C[1, 2] = -im.shape[0] / 2  # y translation (pixels)
+
+    # Perspective
+    P = np.eye(3)
+    P[2, 0] = random.uniform(-perspective, perspective)  # x perspective (about y)
+    P[2, 1] = random.uniform(-perspective, perspective)  # y perspective (about x)
+
+    # Rotation and Scale
+    R = np.eye(3)
+    a = random.uniform(-degrees, degrees)
+    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations
+    s = random.uniform(1 - scale, 1 + scale)
+    # s = 2 ** random.uniform(-scale, scale)
+    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
+
+    # Shear
+    S = np.eye(3)
+    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # x shear (deg)
+    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # y shear (deg)
+
+    # Translation
+    T = np.eye(3)
+    T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width  # x translation (pixels)
+    T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height  # y translation (pixels)
+
+    # Combined rotation matrix
+    M = T @ S @ R @ P @ C  # order of operations (right to left) is IMPORTANT
+    if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():  # image changed
+        if perspective:
+            im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
+        else:  # affine
+            im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
+
+    # Visualize
+    # import matplotlib.pyplot as plt
+    # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
+    # ax[0].imshow(im[:, :, ::-1])  # base
+    # ax[1].imshow(im2[:, :, ::-1])  # warped
+
+    # Transform label coordinates
+    n = len(targets)
+    if n:
+        use_segments = any(x.any() for x in segments) and len(segments) == n
+        new = np.zeros((n, 4))
+        if use_segments:  # warp segments
+            segments = resample_segments(segments)  # upsample
+            for i, segment in enumerate(segments):
+                xy = np.ones((len(segment), 3))
+                xy[:, :2] = segment
+                xy = xy @ M.T  # transform
+                xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]  # perspective rescale or affine
+
+                # clip
+                new[i] = segment2box(xy, width, height)
+
+        else:  # warp boxes
+            xy = np.ones((n * 4, 3))
+            xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2)  # x1y1, x2y2, x1y2, x2y1
+            xy = xy @ M.T  # transform
+            xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8)  # perspective rescale or affine
+
+            # create new boxes
+            x = xy[:, [0, 2, 4, 6]]
+            y = xy[:, [1, 3, 5, 7]]
+            new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
+
+            # clip
+            new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
+            new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
+
+        # filter candidates
+        i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
+        targets = targets[i]
+        targets[:, 1:5] = new[i]
+
+    return im, targets
+
+
+def copy_paste(im, labels, segments, p=0.5):
+    # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
+    n = len(segments)
+    if p and n:
+        h, w, c = im.shape  # height, width, channels
+        im_new = np.zeros(im.shape, np.uint8)
+        for j in random.sample(range(n), k=round(p * n)):
+            l, s = labels[j], segments[j]
+            box = w - l[3], l[2], w - l[1], l[4]
+            ioa = bbox_ioa(box, labels[:, 1:5])  # intersection over area
+            if (ioa < 0.30).all():  # allow 30% obscuration of existing labels
+                labels = np.concatenate((labels, [[l[0], *box]]), 0)
+                segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
+                cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED)
+
+        result = cv2.flip(im, 1)  # augment segments (flip left-right)
+        i = cv2.flip(im_new, 1).astype(bool)
+        im[i] = result[i]  # cv2.imwrite('debug.jpg', im)  # debug
+
+    return im, labels, segments
+
+
+def cutout(im, labels, p=0.5):
+    # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
+    if random.random() < p:
+        h, w = im.shape[:2]
+        scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16  # image size fraction
+        for s in scales:
+            mask_h = random.randint(1, int(h * s))  # create random masks
+            mask_w = random.randint(1, int(w * s))
+
+            # box
+            xmin = max(0, random.randint(0, w) - mask_w // 2)
+            ymin = max(0, random.randint(0, h) - mask_h // 2)
+            xmax = min(w, xmin + mask_w)
+            ymax = min(h, ymin + mask_h)
+
+            # apply random color mask
+            im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
+
+            # return unobscured labels
+            if len(labels) and s > 0.03:
+                box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
+                ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h))  # intersection over area
+                labels = labels[ioa < 0.60]  # remove >60% obscured labels
+
+    return labels
+
+
+def mixup(im, labels, im2, labels2):
+    # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
+    r = np.random.beta(32.0, 32.0)  # mixup ratio, alpha=beta=32.0
+    im = (im * r + im2 * (1 - r)).astype(np.uint8)
+    labels = np.concatenate((labels, labels2), 0)
+    return im, labels
+
+
+def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):  # box1(4,n), box2(4,n)
+    # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
+    w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
+    w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
+    ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps))  # aspect ratio
+    return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr)  # candidates
+
+
+def classify_albumentations(
+        augment=True,
+        size=224,
+        scale=(0.08, 1.0),
+        ratio=(0.75, 1.0 / 0.75),  # 0.75, 1.33
+        hflip=0.5,
+        vflip=0.0,
+        jitter=0.4,
+        mean=IMAGENET_MEAN,
+        std=IMAGENET_STD,
+        auto_aug=False):
+    # YOLOv5 classification Albumentations (optional, only used if package is installed)
+    prefix = colorstr('albumentations: ')
+    try:
+        import albumentations as A
+        from albumentations.pytorch import ToTensorV2
+        check_version(A.__version__, '1.0.3', hard=True)  # version requirement
+        if augment:  # Resize and crop
+            T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)]
+            if auto_aug:
+                # TODO: implement AugMix, AutoAug & RandAug in albumentation
+                LOGGER.info(f'{prefix}auto augmentations are currently not supported')
+            else:
+                if hflip > 0:
+                    T += [A.HorizontalFlip(p=hflip)]
+                if vflip > 0:
+                    T += [A.VerticalFlip(p=vflip)]
+                if jitter > 0:
+                    color_jitter = (float(jitter), ) * 3  # repeat value for brightness, contrast, satuaration, 0 hue
+                    T += [A.ColorJitter(*color_jitter, 0)]
+        else:  # Use fixed crop for eval set (reproducibility)
+            T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]
+        T += [A.Normalize(mean=mean, std=std), ToTensorV2()]  # Normalize and convert to Tensor
+        LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
+        return A.Compose(T)
+
+    except ImportError:  # package not installed, skip
+        LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)')
+    except Exception as e:
+        LOGGER.info(f'{prefix}{e}')
+
+
+def classify_transforms(size=224):
+    # Transforms to apply if albumentations not installed
+    assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)'
+    # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
+    return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
+
+
+class LetterBox:
+    # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
+    def __init__(self, size=(640, 640), auto=False, stride=32):
+        super().__init__()
+        self.h, self.w = (size, size) if isinstance(size, int) else size
+        self.auto = auto  # pass max size integer, automatically solve for short side using stride
+        self.stride = stride  # used with auto
+
+    def __call__(self, im):  # im = np.array HWC
+        imh, imw = im.shape[:2]
+        r = min(self.h / imh, self.w / imw)  # ratio of new/old
+        h, w = round(imh * r), round(imw * r)  # resized image
+        hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w
+        top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)
+        im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype)
+        im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
+        return im_out
+
+
+class CenterCrop:
+    # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])
+    def __init__(self, size=640):
+        super().__init__()
+        self.h, self.w = (size, size) if isinstance(size, int) else size
+
+    def __call__(self, im):  # im = np.array HWC
+        imh, imw = im.shape[:2]
+        m = min(imh, imw)  # min dimension
+        top, left = (imh - m) // 2, (imw - m) // 2
+        return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR)
+
+
+class ToTensor:
+    # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
+    def __init__(self, half=False):
+        super().__init__()
+        self.half = half
+
+    def __call__(self, im):  # im = np.array HWC in BGR order
+        im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1])  # HWC to CHW -> BGR to RGB -> contiguous
+        im = torch.from_numpy(im)  # to torch
+        im = im.half() if self.half else im.float()  # uint8 to fp16/32
+        im /= 255.0  # 0-255 to 0.0-1.0
+        return im

Alguns ficheiros não foram mostrados porque muitos ficheiros mudaram neste diff