val.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
  2. """
  3. Validate a trained YOLOv5 segment model on a segment dataset
  4. Usage:
  5. $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images)
  6. $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments
  7. Usage - formats:
  8. $ python segment/val.py --weights yolov5s-seg.pt # PyTorch
  9. yolov5s-seg.torchscript # TorchScript
  10. yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
  11. yolov5s-seg_openvino_label # OpenVINO
  12. yolov5s-seg.engine # TensorRT
  13. yolov5s-seg.mlmodel # CoreML (macOS-only)
  14. yolov5s-seg_saved_model # TensorFlow SavedModel
  15. yolov5s-seg.pb # TensorFlow GraphDef
  16. yolov5s-seg.tflite # TensorFlow Lite
  17. yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
  18. yolov5s-seg_paddle_model # PaddlePaddle
  19. """
  20. import argparse
  21. import json
  22. import os
  23. import subprocess
  24. import sys
  25. from multiprocessing.pool import ThreadPool
  26. from pathlib import Path
  27. import numpy as np
  28. import torch
  29. from tqdm import tqdm
  30. FILE = Path(__file__).resolve()
  31. ROOT = FILE.parents[1] # YOLOv5 root directory
  32. if str(ROOT) not in sys.path:
  33. sys.path.append(str(ROOT)) # add ROOT to PATH
  34. ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
  35. import torch.nn.functional as F
  36. from models.common import DetectMultiBackend
  37. from models.yolo import SegmentationModel
  38. from utils.callbacks import Callbacks
  39. from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size,
  40. check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path,
  41. non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
  42. from utils.metrics import ConfusionMatrix, box_iou
  43. from utils.plots import output_to_target, plot_val_study
  44. from utils.segment.dataloaders import create_dataloader
  45. from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image
  46. from utils.segment.metrics import Metrics, ap_per_class_box_and_mask
  47. from utils.segment.plots import plot_images_and_masks
  48. from utils.torch_utils import de_parallel, select_device, smart_inference_mode
  49. def save_one_txt(predn, save_conf, shape, file):
  50. # Save one txt result
  51. gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
  52. for *xyxy, conf, cls in predn.tolist():
  53. xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
  54. line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
  55. with open(file, 'a') as f:
  56. f.write(('%g ' * len(line)).rstrip() % line + '\n')
  57. def save_one_json(predn, jdict, path, class_map, pred_masks):
  58. # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
  59. from pycocotools.mask import encode
  60. def single_encode(x):
  61. rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0]
  62. rle['counts'] = rle['counts'].decode('utf-8')
  63. return rle
  64. image_id = int(path.stem) if path.stem.isnumeric() else path.stem
  65. box = xyxy2xywh(predn[:, :4]) # xywh
  66. box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
  67. pred_masks = np.transpose(pred_masks, (2, 0, 1))
  68. with ThreadPool(NUM_THREADS) as pool:
  69. rles = pool.map(single_encode, pred_masks)
  70. for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):
  71. jdict.append({
  72. 'image_id': image_id,
  73. 'category_id': class_map[int(p[5])],
  74. 'bbox': [round(x, 3) for x in b],
  75. 'score': round(p[4], 5),
  76. 'segmentation': rles[i]})
  77. def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False):
  78. """
  79. Return correct prediction matrix
  80. Arguments:
  81. detections (array[N, 6]), x1, y1, x2, y2, conf, class
  82. labels (array[M, 5]), class, x1, y1, x2, y2
  83. Returns:
  84. correct (array[N, 10]), for 10 IoU levels
  85. """
  86. if masks:
  87. if overlap:
  88. nl = len(labels)
  89. index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1
  90. gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640)
  91. gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
  92. if gt_masks.shape[1:] != pred_masks.shape[1:]:
  93. gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0]
  94. gt_masks = gt_masks.gt_(0.5)
  95. iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))
  96. else: # boxes
  97. iou = box_iou(labels[:, 1:], detections[:, :4])
  98. correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
  99. correct_class = labels[:, 0:1] == detections[:, 5]
  100. for i in range(len(iouv)):
  101. x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
  102. if x[0].shape[0]:
  103. matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
  104. if x[0].shape[0] > 1:
  105. matches = matches[matches[:, 2].argsort()[::-1]]
  106. matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
  107. # matches = matches[matches[:, 2].argsort()[::-1]]
  108. matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
  109. correct[matches[:, 1].astype(int), i] = True
  110. return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
  111. @smart_inference_mode()
  112. def run(
  113. data,
  114. weights=None, # model.pt path(s)
  115. batch_size=32, # batch size
  116. imgsz=640, # inference size (pixels)
  117. conf_thres=0.001, # confidence threshold
  118. iou_thres=0.6, # NMS IoU threshold
  119. max_det=300, # maximum detections per image
  120. task='val', # train, val, test, speed or study
  121. device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
  122. workers=8, # max dataloader workers (per RANK in DDP mode)
  123. single_cls=False, # treat as single-class dataset
  124. augment=False, # augmented inference
  125. verbose=False, # verbose output
  126. save_txt=False, # save results to *.txt
  127. save_hybrid=False, # save label+prediction hybrid results to *.txt
  128. save_conf=False, # save confidences in --save-txt labels
  129. save_json=False, # save a COCO-JSON results file
  130. project=ROOT / 'runs/val-seg', # save to project/name
  131. name='exp', # save to project/name
  132. exist_ok=False, # existing project/name ok, do not increment
  133. half=True, # use FP16 half-precision inference
  134. dnn=False, # use OpenCV DNN for ONNX inference
  135. model=None,
  136. dataloader=None,
  137. save_dir=Path(''),
  138. plots=True,
  139. overlap=False,
  140. mask_downsample_ratio=1,
  141. compute_loss=None,
  142. callbacks=Callbacks(),
  143. ):
  144. if save_json:
  145. check_requirements('pycocotools>=2.0.6')
  146. process = process_mask_native # more accurate
  147. else:
  148. process = process_mask # faster
  149. # Initialize/load model and set device
  150. training = model is not None
  151. if training: # called by train.py
  152. device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
  153. half &= device.type != 'cpu' # half precision only supported on CUDA
  154. model.half() if half else model.float()
  155. nm = de_parallel(model).model[-1].nm # number of masks
  156. else: # called directly
  157. device = select_device(device, batch_size=batch_size)
  158. # Directories
  159. save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
  160. (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
  161. # Load model
  162. model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
  163. stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
  164. imgsz = check_img_size(imgsz, s=stride) # check image size
  165. half = model.fp16 # FP16 supported on limited backends with CUDA
  166. nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks
  167. if engine:
  168. batch_size = model.batch_size
  169. else:
  170. device = model.device
  171. if not (pt or jit):
  172. batch_size = 1 # export.py models default to batch-size 1
  173. LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
  174. # Data
  175. data = check_dataset(data) # check
  176. # Configure
  177. model.eval()
  178. cuda = device.type != 'cpu'
  179. is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
  180. nc = 1 if single_cls else int(data['nc']) # number of classes
  181. iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
  182. niou = iouv.numel()
  183. # Dataloader
  184. if not training:
  185. if pt and not single_cls: # check --weights are trained on --data
  186. ncm = model.model.nc
  187. assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
  188. f'classes). Pass correct combination of --weights and --data that are trained together.'
  189. model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
  190. pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks
  191. task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
  192. dataloader = create_dataloader(data[task],
  193. imgsz,
  194. batch_size,
  195. stride,
  196. single_cls,
  197. pad=pad,
  198. rect=rect,
  199. workers=workers,
  200. prefix=colorstr(f'{task}: '),
  201. overlap_mask=overlap,
  202. mask_downsample_ratio=mask_downsample_ratio)[0]
  203. seen = 0
  204. confusion_matrix = ConfusionMatrix(nc=nc)
  205. names = model.names if hasattr(model, 'names') else model.module.names # get class names
  206. if isinstance(names, (list, tuple)): # old format
  207. names = dict(enumerate(names))
  208. class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
  209. s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R',
  210. 'mAP50', 'mAP50-95)')
  211. dt = Profile(), Profile(), Profile()
  212. metrics = Metrics()
  213. loss = torch.zeros(4, device=device)
  214. jdict, stats = [], []
  215. # callbacks.run('on_val_start')
  216. pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
  217. for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar):
  218. # callbacks.run('on_val_batch_start')
  219. with dt[0]:
  220. if cuda:
  221. im = im.to(device, non_blocking=True)
  222. targets = targets.to(device)
  223. masks = masks.to(device)
  224. masks = masks.float()
  225. im = im.half() if half else im.float() # uint8 to fp16/32
  226. im /= 255 # 0 - 255 to 0.0 - 1.0
  227. nb, _, height, width = im.shape # batch size, channels, height, width
  228. # Inference
  229. with dt[1]:
  230. preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None)
  231. # Loss
  232. if compute_loss:
  233. loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls
  234. # NMS
  235. targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
  236. lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
  237. with dt[2]:
  238. preds = non_max_suppression(preds,
  239. conf_thres,
  240. iou_thres,
  241. labels=lb,
  242. multi_label=True,
  243. agnostic=single_cls,
  244. max_det=max_det,
  245. nm=nm)
  246. # Metrics
  247. plot_masks = [] # masks for plotting
  248. for si, (pred, proto) in enumerate(zip(preds, protos)):
  249. labels = targets[targets[:, 0] == si, 1:]
  250. nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
  251. path, shape = Path(paths[si]), shapes[si][0]
  252. correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
  253. correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
  254. seen += 1
  255. if npr == 0:
  256. if nl:
  257. stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0]))
  258. if plots:
  259. confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
  260. continue
  261. # Masks
  262. midx = [si] if overlap else targets[:, 0] == si
  263. gt_masks = masks[midx]
  264. pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:])
  265. # Predictions
  266. if single_cls:
  267. pred[:, 5] = 0
  268. predn = pred.clone()
  269. scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
  270. # Evaluate
  271. if nl:
  272. tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
  273. scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
  274. labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
  275. correct_bboxes = process_batch(predn, labelsn, iouv)
  276. correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True)
  277. if plots:
  278. confusion_matrix.process_batch(predn, labelsn)
  279. stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls)
  280. pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
  281. if plots and batch_i < 3:
  282. plot_masks.append(pred_masks[:15]) # filter top 15 to plot
  283. # Save/log
  284. if save_txt:
  285. save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
  286. if save_json:
  287. pred_masks = scale_image(im[si].shape[1:],
  288. pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1])
  289. save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary
  290. # callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
  291. # Plot images
  292. if plots and batch_i < 3:
  293. if len(plot_masks):
  294. plot_masks = torch.cat(plot_masks, dim=0)
  295. plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names)
  296. plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths,
  297. save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
  298. # callbacks.run('on_val_batch_end')
  299. # Compute metrics
  300. stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
  301. if len(stats) and stats[0].any():
  302. results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names)
  303. metrics.update(results)
  304. nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class
  305. # Print results
  306. pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format
  307. LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results()))
  308. if nt.sum() == 0:
  309. LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
  310. # Print results per class
  311. if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
  312. for i, c in enumerate(metrics.ap_class_index):
  313. LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i)))
  314. # Print speeds
  315. t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
  316. if not training:
  317. shape = (batch_size, 3, imgsz, imgsz)
  318. LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
  319. # Plots
  320. if plots:
  321. confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
  322. # callbacks.run('on_val_end')
  323. mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results()
  324. # Save JSON
  325. if save_json and len(jdict):
  326. w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
  327. anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations
  328. pred_json = str(save_dir / f'{w}_predictions.json') # predictions
  329. LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
  330. with open(pred_json, 'w') as f:
  331. json.dump(jdict, f)
  332. try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
  333. from pycocotools.coco import COCO
  334. from pycocotools.cocoeval import COCOeval
  335. anno = COCO(anno_json) # init annotations api
  336. pred = anno.loadRes(pred_json) # init predictions api
  337. results = []
  338. for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'):
  339. if is_coco:
  340. eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate
  341. eval.evaluate()
  342. eval.accumulate()
  343. eval.summarize()
  344. results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5)
  345. map_bbox, map50_bbox, map_mask, map50_mask = results
  346. except Exception as e:
  347. LOGGER.info(f'pycocotools unable to run: {e}')
  348. # Return results
  349. model.float() # for training
  350. if not training:
  351. s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
  352. LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
  353. final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask
  354. return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t
  355. def parse_opt():
  356. parser = argparse.ArgumentParser()
  357. parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
  358. parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)')
  359. parser.add_argument('--batch-size', type=int, default=32, help='batch size')
  360. parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
  361. parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
  362. parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
  363. parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
  364. parser.add_argument('--task', default='val', help='train, val, test, speed or study')
  365. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
  366. parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
  367. parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
  368. parser.add_argument('--augment', action='store_true', help='augmented inference')
  369. parser.add_argument('--verbose', action='store_true', help='report mAP by class')
  370. parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
  371. parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
  372. parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
  373. parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
  374. parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name')
  375. parser.add_argument('--name', default='exp', help='save to project/name')
  376. parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
  377. parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
  378. parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
  379. opt = parser.parse_args()
  380. opt.data = check_yaml(opt.data) # check YAML
  381. # opt.save_json |= opt.data.endswith('coco.yaml')
  382. opt.save_txt |= opt.save_hybrid
  383. print_args(vars(opt))
  384. return opt
  385. def main(opt):
  386. check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
  387. if opt.task in ('train', 'val', 'test'): # run normally
  388. if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
  389. LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
  390. if opt.save_hybrid:
  391. LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone')
  392. run(**vars(opt))
  393. else:
  394. weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
  395. opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results
  396. if opt.task == 'speed': # speed benchmarks
  397. # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
  398. opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
  399. for opt.weights in weights:
  400. run(**vars(opt), plots=False)
  401. elif opt.task == 'study': # speed vs mAP benchmarks
  402. # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
  403. for opt.weights in weights:
  404. f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
  405. x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
  406. for opt.imgsz in x: # img-size
  407. LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
  408. r, _, t = run(**vars(opt), plots=False)
  409. y.append(r + t) # results and times
  410. np.savetxt(f, y, fmt='%10.4g') # save
  411. subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt'])
  412. plot_val_study(x=x) # plot
  413. else:
  414. raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")')
  415. if __name__ == '__main__':
  416. opt = parse_opt()
  417. main(opt)