metrics.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
  2. """
  3. Model validation metrics
  4. """
  5. import numpy as np
  6. from ..metrics import ap_per_class
  7. def fitness(x):
  8. # Model fitness as a weighted combination of metrics
  9. w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9]
  10. return (x[:, :8] * w).sum(1)
  11. def ap_per_class_box_and_mask(
  12. tp_m,
  13. tp_b,
  14. conf,
  15. pred_cls,
  16. target_cls,
  17. plot=False,
  18. save_dir='.',
  19. names=(),
  20. ):
  21. """
  22. Args:
  23. tp_b: tp of boxes.
  24. tp_m: tp of masks.
  25. other arguments see `func: ap_per_class`.
  26. """
  27. results_boxes = ap_per_class(tp_b,
  28. conf,
  29. pred_cls,
  30. target_cls,
  31. plot=plot,
  32. save_dir=save_dir,
  33. names=names,
  34. prefix='Box')[2:]
  35. results_masks = ap_per_class(tp_m,
  36. conf,
  37. pred_cls,
  38. target_cls,
  39. plot=plot,
  40. save_dir=save_dir,
  41. names=names,
  42. prefix='Mask')[2:]
  43. results = {
  44. 'boxes': {
  45. 'p': results_boxes[0],
  46. 'r': results_boxes[1],
  47. 'ap': results_boxes[3],
  48. 'f1': results_boxes[2],
  49. 'ap_class': results_boxes[4]},
  50. 'masks': {
  51. 'p': results_masks[0],
  52. 'r': results_masks[1],
  53. 'ap': results_masks[3],
  54. 'f1': results_masks[2],
  55. 'ap_class': results_masks[4]}}
  56. return results
  57. class Metric:
  58. def __init__(self) -> None:
  59. self.p = [] # (nc, )
  60. self.r = [] # (nc, )
  61. self.f1 = [] # (nc, )
  62. self.all_ap = [] # (nc, 10)
  63. self.ap_class_index = [] # (nc, )
  64. @property
  65. def ap50(self):
  66. """AP@0.5 of all classes.
  67. Return:
  68. (nc, ) or [].
  69. """
  70. return self.all_ap[:, 0] if len(self.all_ap) else []
  71. @property
  72. def ap(self):
  73. """AP@0.5:0.95
  74. Return:
  75. (nc, ) or [].
  76. """
  77. return self.all_ap.mean(1) if len(self.all_ap) else []
  78. @property
  79. def mp(self):
  80. """mean precision of all classes.
  81. Return:
  82. float.
  83. """
  84. return self.p.mean() if len(self.p) else 0.0
  85. @property
  86. def mr(self):
  87. """mean recall of all classes.
  88. Return:
  89. float.
  90. """
  91. return self.r.mean() if len(self.r) else 0.0
  92. @property
  93. def map50(self):
  94. """Mean AP@0.5 of all classes.
  95. Return:
  96. float.
  97. """
  98. return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0
  99. @property
  100. def map(self):
  101. """Mean AP@0.5:0.95 of all classes.
  102. Return:
  103. float.
  104. """
  105. return self.all_ap.mean() if len(self.all_ap) else 0.0
  106. def mean_results(self):
  107. """Mean of results, return mp, mr, map50, map"""
  108. return (self.mp, self.mr, self.map50, self.map)
  109. def class_result(self, i):
  110. """class-aware result, return p[i], r[i], ap50[i], ap[i]"""
  111. return (self.p[i], self.r[i], self.ap50[i], self.ap[i])
  112. def get_maps(self, nc):
  113. maps = np.zeros(nc) + self.map
  114. for i, c in enumerate(self.ap_class_index):
  115. maps[c] = self.ap[i]
  116. return maps
  117. def update(self, results):
  118. """
  119. Args:
  120. results: tuple(p, r, ap, f1, ap_class)
  121. """
  122. p, r, all_ap, f1, ap_class_index = results
  123. self.p = p
  124. self.r = r
  125. self.all_ap = all_ap
  126. self.f1 = f1
  127. self.ap_class_index = ap_class_index
  128. class Metrics:
  129. """Metric for boxes and masks."""
  130. def __init__(self) -> None:
  131. self.metric_box = Metric()
  132. self.metric_mask = Metric()
  133. def update(self, results):
  134. """
  135. Args:
  136. results: Dict{'boxes': Dict{}, 'masks': Dict{}}
  137. """
  138. self.metric_box.update(list(results['boxes'].values()))
  139. self.metric_mask.update(list(results['masks'].values()))
  140. def mean_results(self):
  141. return self.metric_box.mean_results() + self.metric_mask.mean_results()
  142. def class_result(self, i):
  143. return self.metric_box.class_result(i) + self.metric_mask.class_result(i)
  144. def get_maps(self, nc):
  145. return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)
  146. @property
  147. def ap_class_index(self):
  148. # boxes and masks have the same ap_class_index
  149. return self.metric_box.ap_class_index
  150. KEYS = [
  151. 'train/box_loss',
  152. 'train/seg_loss', # train loss
  153. 'train/obj_loss',
  154. 'train/cls_loss',
  155. 'metrics/precision(B)',
  156. 'metrics/recall(B)',
  157. 'metrics/mAP_0.5(B)',
  158. 'metrics/mAP_0.5:0.95(B)', # metrics
  159. 'metrics/precision(M)',
  160. 'metrics/recall(M)',
  161. 'metrics/mAP_0.5(M)',
  162. 'metrics/mAP_0.5:0.95(M)', # metrics
  163. 'val/box_loss',
  164. 'val/seg_loss', # val loss
  165. 'val/obj_loss',
  166. 'val/cls_loss',
  167. 'x/lr0',
  168. 'x/lr1',
  169. 'x/lr2', ]
  170. BEST_KEYS = [
  171. 'best/epoch',
  172. 'best/precision(B)',
  173. 'best/recall(B)',
  174. 'best/mAP_0.5(B)',
  175. 'best/mAP_0.5:0.95(B)',
  176. 'best/precision(M)',
  177. 'best/recall(M)',
  178. 'best/mAP_0.5(M)',
  179. 'best/mAP_0.5:0.95(M)', ]