general.py 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555
  1. # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
  2. """
  3. General utils
  4. """
  5. import contextlib
  6. import glob
  7. import inspect
  8. import logging
  9. import logging.config
  10. import math
  11. import os
  12. import platform
  13. import random
  14. import re
  15. import signal
  16. import subprocess
  17. import sys
  18. import time
  19. import urllib
  20. from copy import deepcopy
  21. from datetime import datetime
  22. from itertools import repeat
  23. from multiprocessing.pool import ThreadPool
  24. from pathlib import Path
  25. from subprocess import check_output
  26. from tarfile import is_tarfile
  27. from typing import Optional
  28. from zipfile import ZipFile, is_zipfile
  29. from torchvision import transforms
  30. from PIL import Image
  31. import cv2
  32. import numpy as np
  33. import pandas as pd
  34. import pkg_resources as pkg
  35. import torch
  36. import torchvision
  37. import yaml
  38. import requests
  39. from ultralytics.utils.checks import check_requirements
  40. from ultralytics.data.augment import classify_transforms
  41. from utils import TryExcept, emojis
  42. from utils.downloads import curl_download, gsutil_getsize
  43. from utils.metrics import box_iou, fitness
  44. FILE = Path(__file__).resolve()
  45. ROOT = FILE.parents[1] # YOLOv5 root directory
  46. RANK = int(os.getenv('RANK', -1))
  47. import transforms as trans
  48. # Settings
  49. NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads
  50. DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory
  51. AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode
  52. VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode
  53. TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format
  54. FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf
  55. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  56. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  57. pd.options.display.max_columns = 10
  58. cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
  59. os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
  60. os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy)
  61. os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab
  62. mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
  63. test = transforms.Compose([transforms.Resize((224,224)),
  64. #transforms.CenterCrop(224),
  65. transforms.ToTensor(),
  66. transforms.Normalize(mean=mean, std=std)
  67. ])
  68. v8transforms = classify_transforms(64)
  69. transforma = trans.Compose([
  70. trans.Scale((224, 224)),
  71. transforms.ToTensor(),
  72. transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), # 处理的是Tensor
  73. ])
  74. def is_ascii(s=''):
  75. # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)
  76. s = str(s) # convert list, tuple, None, etc. to str
  77. return len(s.encode().decode('ascii', 'ignore')) == len(s)
  78. def is_chinese(s='人工智能'):
  79. # Is string composed of any Chinese characters?
  80. return bool(re.search('[\u4e00-\u9fff]', str(s)))
  81. def is_colab():
  82. # Is environment a Google Colab instance?
  83. return 'google.colab' in sys.modules
  84. def is_jupyter():
  85. """
  86. Check if the current script is running inside a Jupyter Notebook.
  87. Verified on Colab, Jupyterlab, Kaggle, Paperspace.
  88. Returns:
  89. bool: True if running inside a Jupyter Notebook, False otherwise.
  90. """
  91. with contextlib.suppress(Exception):
  92. from IPython import get_ipython
  93. return get_ipython() is not None
  94. return False
  95. def is_kaggle():
  96. # Is environment a Kaggle Notebook?
  97. return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com'
  98. def is_docker() -> bool:
  99. """Check if the process runs inside a docker container."""
  100. if Path('/.dockerenv').exists():
  101. return True
  102. try: # check if docker is in control groups
  103. with open('/proc/self/cgroup') as file:
  104. return any('docker' in line for line in file)
  105. except OSError:
  106. return False
  107. def is_writeable(dir, test=False):
  108. # Return True if directory has write permissions, test opening a file with write permissions if test=True
  109. if not test:
  110. return os.access(dir, os.W_OK) # possible issues on Windows
  111. file = Path(dir) / 'tmp.txt'
  112. try:
  113. with open(file, 'w'): # open file with write permissions
  114. pass
  115. file.unlink() # remove file
  116. return True
  117. except OSError:
  118. return False
  119. LOGGING_NAME = 'yolov5'
  120. def set_logging(name=LOGGING_NAME, verbose=True):
  121. # sets up logging for the given name
  122. rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
  123. level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR
  124. logging.config.dictConfig({
  125. 'version': 1,
  126. 'disable_existing_loggers': False,
  127. 'formatters': {
  128. name: {
  129. 'format': '%(message)s'}},
  130. 'handlers': {
  131. name: {
  132. 'class': 'logging.StreamHandler',
  133. 'formatter': name,
  134. 'level': level, }},
  135. 'loggers': {
  136. name: {
  137. 'level': level,
  138. 'handlers': [name],
  139. 'propagate': False, }}})
  140. set_logging(LOGGING_NAME) # run before defining LOGGER
  141. LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)
  142. if platform.system() == 'Windows':
  143. for fn in LOGGER.info, LOGGER.warning:
  144. setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging
  145. def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
  146. # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.
  147. env = os.getenv(env_var)
  148. if env:
  149. path = Path(env) # use environment variable
  150. else:
  151. cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs
  152. path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir
  153. path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable
  154. path.mkdir(exist_ok=True) # make if required
  155. return path
  156. CONFIG_DIR = user_config_dir() # Ultralytics settings dir
  157. class Profile(contextlib.ContextDecorator):
  158. # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager
  159. def __init__(self, t=0.0):
  160. self.t = t
  161. self.cuda = torch.cuda.is_available()
  162. def __enter__(self):
  163. self.start = self.time()
  164. return self
  165. def __exit__(self, type, value, traceback):
  166. self.dt = self.time() - self.start # delta-time
  167. self.t += self.dt # accumulate dt
  168. def time(self):
  169. if self.cuda:
  170. torch.cuda.synchronize()
  171. return time.time()
  172. class Timeout(contextlib.ContextDecorator):
  173. # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager
  174. def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):
  175. self.seconds = int(seconds)
  176. self.timeout_message = timeout_msg
  177. self.suppress = bool(suppress_timeout_errors)
  178. def _timeout_handler(self, signum, frame):
  179. raise TimeoutError(self.timeout_message)
  180. def __enter__(self):
  181. if platform.system() != 'Windows': # not supported on Windows
  182. signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM
  183. signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
  184. def __exit__(self, exc_type, exc_val, exc_tb):
  185. if platform.system() != 'Windows':
  186. signal.alarm(0) # Cancel SIGALRM if it's scheduled
  187. if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
  188. return True
  189. class WorkingDirectory(contextlib.ContextDecorator):
  190. # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager
  191. def __init__(self, new_dir):
  192. self.dir = new_dir # new dir
  193. self.cwd = Path.cwd().resolve() # current dir
  194. def __enter__(self):
  195. os.chdir(self.dir)
  196. def __exit__(self, exc_type, exc_val, exc_tb):
  197. os.chdir(self.cwd)
  198. def methods(instance):
  199. # Get class/instance methods
  200. return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith('__')]
  201. def print_args(args: Optional[dict] = None, show_file=True, show_func=False):
  202. # Print function arguments (optional args dict)
  203. x = inspect.currentframe().f_back # previous frame
  204. file, _, func, _, _ = inspect.getframeinfo(x)
  205. if args is None: # get args automatically
  206. args, _, _, frm = inspect.getargvalues(x)
  207. args = {k: v for k, v in frm.items() if k in args}
  208. try:
  209. file = Path(file).resolve().relative_to(ROOT).with_suffix('')
  210. except ValueError:
  211. file = Path(file).stem
  212. s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')
  213. LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))
  214. def init_seeds(seed=0, deterministic=False):
  215. # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
  216. random.seed(seed)
  217. np.random.seed(seed)
  218. torch.manual_seed(seed)
  219. torch.cuda.manual_seed(seed)
  220. torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe
  221. # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287
  222. if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213
  223. torch.use_deterministic_algorithms(True)
  224. torch.backends.cudnn.deterministic = True
  225. os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
  226. os.environ['PYTHONHASHSEED'] = str(seed)
  227. def intersect_dicts(da, db, exclude=()):
  228. # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
  229. return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape}
  230. def get_default_args(func):
  231. # Get func() default arguments
  232. signature = inspect.signature(func)
  233. return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
  234. def get_latest_run(search_dir='.'):
  235. # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
  236. last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
  237. return max(last_list, key=os.path.getctime) if last_list else ''
  238. def file_age(path=__file__):
  239. # Return days since last file update
  240. dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta
  241. return dt.days # + dt.seconds / 86400 # fractional days
  242. def file_date(path=__file__):
  243. # Return human-readable file modification date, i.e. '2021-3-26'
  244. t = datetime.fromtimestamp(Path(path).stat().st_mtime)
  245. return f'{t.year}-{t.month}-{t.day}'
  246. def file_size(path):
  247. # Return file/dir size (MB)
  248. mb = 1 << 20 # bytes to MiB (1024 ** 2)
  249. path = Path(path)
  250. if path.is_file():
  251. return path.stat().st_size / mb
  252. elif path.is_dir():
  253. return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb
  254. else:
  255. return 0.0
  256. def check_online():
  257. # Check internet connectivity
  258. import socket
  259. def run_once():
  260. # Check once
  261. try:
  262. socket.create_connection(('1.1.1.1', 443), 5) # check host accessibility
  263. return True
  264. except OSError:
  265. return False
  266. return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues
  267. def git_describe(path=ROOT): # path must be a directory
  268. # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
  269. try:
  270. assert (Path(path) / '.git').is_dir()
  271. return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1]
  272. except Exception:
  273. return ''
  274. @TryExcept()
  275. @WorkingDirectory(ROOT)
  276. def check_git_status(repo='ultralytics/yolov5', branch='master'):
  277. # YOLOv5 status check, recommend 'git pull' if code is out of date
  278. url = f'https://github.com/{repo}'
  279. msg = f', for updates see {url}'
  280. s = colorstr('github: ') # string
  281. assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg
  282. assert check_online(), s + 'skipping check (offline)' + msg
  283. splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode())
  284. matches = [repo in s for s in splits]
  285. if any(matches):
  286. remote = splits[matches.index(True) - 1]
  287. else:
  288. remote = 'ultralytics'
  289. check_output(f'git remote add {remote} {url}', shell=True)
  290. check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch
  291. local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
  292. n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind
  293. if n > 0:
  294. pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}'
  295. s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use '{pull}' or 'git clone {url}' to update."
  296. else:
  297. s += f'up to date with {url} ✅'
  298. LOGGER.info(s)
  299. @WorkingDirectory(ROOT)
  300. def check_git_info(path='.'):
  301. # YOLOv5 git info check, return {remote, branch, commit}
  302. check_requirements('gitpython')
  303. import git
  304. try:
  305. repo = git.Repo(path)
  306. remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/ultralytics/yolov5'
  307. commit = repo.head.commit.hexsha # i.e. '3134699c73af83aac2a481435550b968d5792c0d'
  308. try:
  309. branch = repo.active_branch.name # i.e. 'main'
  310. except TypeError: # not on any branch
  311. branch = None # i.e. 'detached HEAD' state
  312. return {'remote': remote, 'branch': branch, 'commit': commit}
  313. except git.exc.InvalidGitRepositoryError: # path is not a git dir
  314. return {'remote': None, 'branch': None, 'commit': None}
  315. def check_python(minimum='3.7.0'):
  316. # Check current python version vs. required python version
  317. check_version(platform.python_version(), minimum, name='Python ', hard=True)
  318. def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):
  319. # Check version vs. required version
  320. current, minimum = (pkg.parse_version(x) for x in (current, minimum))
  321. result = (current == minimum) if pinned else (current >= minimum) # bool
  322. s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string
  323. if hard:
  324. assert result, emojis(s) # assert min requirements met
  325. if verbose and not result:
  326. LOGGER.warning(s)
  327. return result
  328. def check_img_size(imgsz, s=32, floor=0):
  329. # Verify image size is a multiple of stride s in each dimension
  330. if isinstance(imgsz, int): # integer i.e. img_size=640
  331. new_size = max(make_divisible(imgsz, int(s)), floor)
  332. else: # list i.e. img_size=[640, 480]
  333. imgsz = list(imgsz) # convert to list if tuple
  334. new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
  335. if new_size != imgsz:
  336. LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')
  337. return new_size
  338. def check_imshow(warn=False):
  339. # Check if environment supports image displays
  340. try:
  341. assert not is_jupyter()
  342. assert not is_docker()
  343. cv2.imshow('test', np.zeros((1, 1, 3)))
  344. cv2.waitKey(1)
  345. cv2.destroyAllWindows()
  346. cv2.waitKey(1)
  347. return True
  348. except Exception as e:
  349. if warn:
  350. LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}')
  351. return False
  352. def check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''):
  353. # Check file(s) for acceptable suffix
  354. if file and suffix:
  355. if isinstance(suffix, str):
  356. suffix = [suffix]
  357. for f in file if isinstance(file, (list, tuple)) else [file]:
  358. s = Path(f).suffix.lower() # file suffix
  359. if len(s):
  360. assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}'
  361. def check_yaml(file, suffix=('.yaml', '.yml')):
  362. # Search/download YAML file (if necessary) and return path, checking suffix
  363. return check_file(file, suffix)
  364. def check_file(file, suffix=''):
  365. # Search/download file (if necessary) and return path
  366. check_suffix(file, suffix) # optional
  367. file = str(file) # convert to str()
  368. if os.path.isfile(file) or not file: # exists
  369. return file
  370. elif file.startswith(('http:/', 'https:/')): # download
  371. url = file # warning: Pathlib turns :// -> :/
  372. file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth
  373. if os.path.isfile(file):
  374. LOGGER.info(f'Found {url} locally at {file}') # file already exists
  375. else:
  376. LOGGER.info(f'Downloading {url} to {file}...')
  377. torch.hub.download_url_to_file(url, file)
  378. assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
  379. return file
  380. elif file.startswith('clearml://'): # ClearML Dataset ID
  381. assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'."
  382. return file
  383. else: # search
  384. files = []
  385. for d in 'data', 'models', 'utils': # search directories
  386. files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
  387. assert len(files), f'File not found: {file}' # assert file was found
  388. assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
  389. return files[0] # return file
  390. def check_font(font=FONT, progress=False):
  391. # Download font to CONFIG_DIR if necessary
  392. font = Path(font)
  393. file = CONFIG_DIR / font.name
  394. if not font.exists() and not file.exists():
  395. url = f'https://ultralytics.com/assets/{font.name}'
  396. LOGGER.info(f'Downloading {url} to {file}...')
  397. torch.hub.download_url_to_file(url, str(file), progress=progress)
  398. def check_dataset(data, autodownload=True):
  399. # Download, check and/or unzip dataset if not found locally
  400. # Download (optional)
  401. extract_dir = ''
  402. if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)):
  403. download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1)
  404. data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml'))
  405. extract_dir, autodownload = data.parent, False
  406. # Read yaml (optional)
  407. if isinstance(data, (str, Path)):
  408. data = yaml_load(data) # dictionary
  409. # Checks
  410. for k in 'train', 'val', 'names':
  411. assert k in data, emojis(f"data.yaml '{k}:' field missing ❌")
  412. if isinstance(data['names'], (list, tuple)): # old array format
  413. data['names'] = dict(enumerate(data['names'])) # convert to dict
  414. assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car'
  415. data['nc'] = len(data['names'])
  416. # Resolve paths
  417. path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.'
  418. if not path.is_absolute():
  419. path = (ROOT / path).resolve()
  420. data['path'] = path # download scripts
  421. for k in 'train', 'val', 'test':
  422. if data.get(k): # prepend path
  423. if isinstance(data[k], str):
  424. x = (path / data[k]).resolve()
  425. if not x.exists() and data[k].startswith('../'):
  426. x = (path / data[k][3:]).resolve()
  427. data[k] = str(x)
  428. else:
  429. data[k] = [str((path / x).resolve()) for x in data[k]]
  430. # Parse yaml
  431. train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))
  432. if val:
  433. val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
  434. if not all(x.exists() for x in val):
  435. LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])
  436. if not s or not autodownload:
  437. raise Exception('Dataset not found ❌')
  438. t = time.time()
  439. if s.startswith('http') and s.endswith('.zip'): # URL
  440. f = Path(s).name # filename
  441. LOGGER.info(f'Downloading {s} to {f}...')
  442. torch.hub.download_url_to_file(s, f)
  443. Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root
  444. unzip_file(f, path=DATASETS_DIR) # unzip
  445. Path(f).unlink() # remove zip
  446. r = None # success
  447. elif s.startswith('bash '): # bash script
  448. LOGGER.info(f'Running {s} ...')
  449. r = subprocess.run(s, shell=True)
  450. else: # python script
  451. r = exec(s, {'yaml': data}) # return None
  452. dt = f'({round(time.time() - t, 1)}s)'
  453. s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f'failure {dt} ❌'
  454. LOGGER.info(f'Dataset download {s}')
  455. check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts
  456. return data # dictionary
  457. def check_amp(model):
  458. # Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation
  459. from models.common import AutoShape, DetectMultiBackend
  460. def amp_allclose(model, im):
  461. # All close FP32 vs AMP results
  462. m = AutoShape(model, verbose=False) # model
  463. a = m(im).xywhn[0] # FP32 inference
  464. m.amp = True
  465. b = m(im).xywhn[0] # AMP inference
  466. return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance
  467. prefix = colorstr('AMP: ')
  468. device = next(model.parameters()).device # get model device
  469. if device.type in ('cpu', 'mps'):
  470. return False # AMP only used on CUDA devices
  471. f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check
  472. im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3))
  473. try:
  474. assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im)
  475. LOGGER.info(f'{prefix}checks passed ✅')
  476. return True
  477. except Exception:
  478. help_url = 'https://github.com/ultralytics/yolov5/issues/7908'
  479. LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}')
  480. return False
  481. def yaml_load(file='data.yaml'):
  482. # Single-line safe yaml loading
  483. with open(file, errors='ignore') as f:
  484. return yaml.safe_load(f)
  485. def yaml_save(file='data.yaml', data={}):
  486. # Single-line safe yaml saving
  487. with open(file, 'w') as f:
  488. yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False)
  489. def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):
  490. # Unzip a *.zip file to path/, excluding files containing strings in exclude list
  491. if path is None:
  492. path = Path(file).parent # default path
  493. with ZipFile(file) as zipObj:
  494. for f in zipObj.namelist(): # list all archived filenames in the zip
  495. if all(x not in f for x in exclude):
  496. zipObj.extract(f, path=path)
  497. def url2file(url):
  498. # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
  499. url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
  500. return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
  501. def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):
  502. # Multithreaded file download and unzip function, used in data.yaml for autodownload
  503. def download_one(url, dir):
  504. # Download 1 file
  505. success = True
  506. if os.path.isfile(url):
  507. f = Path(url) # filename
  508. else: # does not exist
  509. f = dir / Path(url).name
  510. LOGGER.info(f'Downloading {url} to {f}...')
  511. for i in range(retry + 1):
  512. if curl:
  513. success = curl_download(url, f, silent=(threads > 1))
  514. else:
  515. torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download
  516. success = f.is_file()
  517. if success:
  518. break
  519. elif i < retry:
  520. LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...')
  521. else:
  522. LOGGER.warning(f'❌ Failed to download {url}...')
  523. if unzip and success and (f.suffix == '.gz' or is_zipfile(f) or is_tarfile(f)):
  524. LOGGER.info(f'Unzipping {f}...')
  525. if is_zipfile(f):
  526. unzip_file(f, dir) # unzip
  527. elif is_tarfile(f):
  528. subprocess.run(['tar', 'xf', f, '--directory', f.parent], check=True) # unzip
  529. elif f.suffix == '.gz':
  530. subprocess.run(['tar', 'xfz', f, '--directory', f.parent], check=True) # unzip
  531. if delete:
  532. f.unlink() # remove zip
  533. dir = Path(dir)
  534. dir.mkdir(parents=True, exist_ok=True) # make directory
  535. if threads > 1:
  536. pool = ThreadPool(threads)
  537. pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded
  538. pool.close()
  539. pool.join()
  540. else:
  541. for u in [url] if isinstance(url, (str, Path)) else url:
  542. download_one(u, dir)
  543. def make_divisible(x, divisor):
  544. # Returns nearest x divisible by divisor
  545. if isinstance(divisor, torch.Tensor):
  546. divisor = int(divisor.max()) # to int
  547. return math.ceil(x / divisor) * divisor
  548. def clean_str(s):
  549. # Cleans a string by replacing special characters with underscore _
  550. return re.sub(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s)
  551. def one_cycle(y1=0.0, y2=1.0, steps=100):
  552. # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf
  553. return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
  554. def colorstr(*input):
  555. # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
  556. *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
  557. colors = {
  558. 'black': '\033[30m', # basic colors
  559. 'red': '\033[31m',
  560. 'green': '\033[32m',
  561. 'yellow': '\033[33m',
  562. 'blue': '\033[34m',
  563. 'magenta': '\033[35m',
  564. 'cyan': '\033[36m',
  565. 'white': '\033[37m',
  566. 'bright_black': '\033[90m', # bright colors
  567. 'bright_red': '\033[91m',
  568. 'bright_green': '\033[92m',
  569. 'bright_yellow': '\033[93m',
  570. 'bright_blue': '\033[94m',
  571. 'bright_magenta': '\033[95m',
  572. 'bright_cyan': '\033[96m',
  573. 'bright_white': '\033[97m',
  574. 'end': '\033[0m', # misc
  575. 'bold': '\033[1m',
  576. 'underline': '\033[4m'}
  577. return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
  578. def labels_to_class_weights(labels, nc=80):
  579. # Get class weights (inverse frequency) from training labels
  580. if labels[0] is None: # no labels loaded
  581. return torch.Tensor()
  582. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  583. classes = labels[:, 0].astype(int) # labels = [class xywh]
  584. weights = np.bincount(classes, minlength=nc) # occurrences per class
  585. # Prepend gridpoint count (for uCE training)
  586. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  587. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  588. weights[weights == 0] = 1 # replace empty bins with 1
  589. weights = 1 / weights # number of targets per class
  590. weights /= weights.sum() # normalize
  591. return torch.from_numpy(weights).float()
  592. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  593. # Produces image weights based on class_weights and image contents
  594. # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample
  595. class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels])
  596. return (class_weights.reshape(1, nc) * class_counts).sum(1)
  597. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  598. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  599. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  600. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  601. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  602. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  603. return [
  604. 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  605. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  606. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  607. def xyxy2xywh(x):
  608. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  609. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  610. y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center
  611. y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center
  612. y[..., 2] = x[..., 2] - x[..., 0] # width
  613. y[..., 3] = x[..., 3] - x[..., 1] # height
  614. return y
  615. def xywh2xyxy(x):
  616. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  617. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  618. y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x
  619. y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y
  620. y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x
  621. y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y
  622. return y
  623. def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
  624. # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  625. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  626. y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x
  627. y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y
  628. y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x
  629. y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y
  630. return y
  631. def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
  632. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
  633. if clip:
  634. clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
  635. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  636. y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center
  637. y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center
  638. y[..., 2] = (x[..., 2] - x[..., 0]) / w # width
  639. y[..., 3] = (x[..., 3] - x[..., 1]) / h # height
  640. return y
  641. def xyn2xy(x, w=640, h=640, padw=0, padh=0):
  642. # Convert normalized segments into pixel segments, shape (n,2)
  643. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  644. y[..., 0] = w * x[..., 0] + padw # top left x
  645. y[..., 1] = h * x[..., 1] + padh # top left y
  646. return y
  647. def segment2box(segment, width=640, height=640):
  648. # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
  649. x, y = segment.T # segment xy
  650. inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
  651. x, y, = x[inside], y[inside]
  652. return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
  653. def segments2boxes(segments):
  654. # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
  655. boxes = []
  656. for s in segments:
  657. x, y = s.T # segment xy
  658. boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
  659. return xyxy2xywh(np.array(boxes)) # cls, xywh
  660. def resample_segments(segments, n=1000):
  661. # Up-sample an (n,2) segment
  662. for i, s in enumerate(segments):
  663. s = np.concatenate((s, s[0:1, :]), axis=0)
  664. x = np.linspace(0, len(s) - 1, n)
  665. xp = np.arange(len(s))
  666. segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
  667. return segments
  668. def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
  669. # Rescale boxes (xyxy) from img1_shape to img0_shape
  670. if ratio_pad is None: # calculate from img0_shape
  671. gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
  672. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  673. else:
  674. gain = ratio_pad[0][0]
  675. pad = ratio_pad[1]
  676. boxes[..., [0, 2]] -= pad[0] # x padding
  677. boxes[..., [1, 3]] -= pad[1] # y padding
  678. boxes[..., :4] /= gain
  679. clip_boxes(boxes, img0_shape)
  680. return boxes
  681. def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):
  682. # Rescale coords (xyxy) from img1_shape to img0_shape
  683. if ratio_pad is None: # calculate from img0_shape
  684. gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
  685. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  686. else:
  687. gain = ratio_pad[0][0]
  688. pad = ratio_pad[1]
  689. segments[:, 0] -= pad[0] # x padding
  690. segments[:, 1] -= pad[1] # y padding
  691. segments /= gain
  692. clip_segments(segments, img0_shape)
  693. if normalize:
  694. segments[:, 0] /= img0_shape[1] # width
  695. segments[:, 1] /= img0_shape[0] # height
  696. return segments
  697. def strtolst(strpoint):
  698. strpoint = strpoint.split(":")
  699. lista = []
  700. for liststr in strpoint:
  701. if len(liststr) > 0:
  702. liststr = liststr.split(',')
  703. for point in liststr:
  704. lista.append(point.split('#'))
  705. return lista
  706. def strtolstl(strpoint):
  707. #print(f'strpoint = {strpoint}')
  708. strpoint = strpoint.split(":")
  709. lista = []
  710. for i ,liststr in enumerate(strpoint):
  711. #lista.append([])
  712. if len(liststr) > 0:
  713. lista.append([])
  714. liststr = liststr.split(',')
  715. for point in liststr:
  716. lista[i].append(point.split('#'))
  717. return lista
  718. def clip_boxes(boxes, shape):
  719. # Clip boxes (xyxy) to image shape (height, width)
  720. if isinstance(boxes, torch.Tensor): # faster individually
  721. boxes[..., 0].clamp_(0, shape[1]) # x1
  722. boxes[..., 1].clamp_(0, shape[0]) # y1
  723. boxes[..., 2].clamp_(0, shape[1]) # x2
  724. boxes[..., 3].clamp_(0, shape[0]) # y2
  725. else: # np.array (faster grouped)
  726. boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
  727. boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2
  728. def clip_segments(segments, shape):
  729. # Clip segments (xy1,xy2,...) to image shape (height, width)
  730. if isinstance(segments, torch.Tensor): # faster individually
  731. segments[:, 0].clamp_(0, shape[1]) # x
  732. segments[:, 1].clamp_(0, shape[0]) # y
  733. else: # np.array (faster grouped)
  734. segments[:, 0] = segments[:, 0].clip(0, shape[1]) # x
  735. segments[:, 1] = segments[:, 1].clip(0, shape[0]) # y
  736. def non_max_suppression(
  737. prediction,
  738. conf_thres=0.25,
  739. iou_thres=0.45,
  740. classes=None,
  741. agnostic=False,
  742. multi_label=False,
  743. labels=(),
  744. max_det=300,
  745. nm=0, # number of masks
  746. ):
  747. """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections
  748. Returns:
  749. list of detections, on (n,6) tensor per image [xyxy, conf, cls]
  750. """
  751. # Checks
  752. assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
  753. assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
  754. if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out)
  755. prediction = prediction[0] # select only inference output
  756. device = prediction.device
  757. mps = 'mps' in device.type # Apple MPS
  758. if mps: # MPS not fully supported yet, convert tensors to CPU before NMS
  759. prediction = prediction.cpu()
  760. bs = prediction.shape[0] # batch size
  761. nc = prediction.shape[2] - nm - 5 # number of classes
  762. xc = prediction[..., 4] > conf_thres # candidates
  763. # Settings
  764. # min_wh = 2 # (pixels) minimum box width and height
  765. max_wh = 7680 # (pixels) maximum box width and height
  766. max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
  767. time_limit = 0.5 + 0.05 * bs # seconds to quit after
  768. redundant = True # require redundant detections
  769. multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
  770. merge = False # use merge-NMS
  771. t = time.time()
  772. mi = 5 + nc # mask start index
  773. output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs
  774. for xi, x in enumerate(prediction): # image index, image inference
  775. # Apply constraints
  776. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  777. x = x[xc[xi]] # confidence
  778. # Cat apriori labels if autolabelling
  779. if labels and len(labels[xi]):
  780. lb = labels[xi]
  781. v = torch.zeros((len(lb), nc + nm + 5), device=x.device)
  782. v[:, :4] = lb[:, 1:5] # box
  783. v[:, 4] = 1.0 # conf
  784. v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls
  785. x = torch.cat((x, v), 0)
  786. # If none remain process next image
  787. if not x.shape[0]:
  788. continue
  789. # Compute conf
  790. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  791. # Box/Mask
  792. box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2)
  793. mask = x[:, mi:] # zero columns if no masks
  794. # Detections matrix nx6 (xyxy, conf, cls)
  795. if multi_label:
  796. i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T
  797. x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)
  798. else: # best class only
  799. conf, j = x[:, 5:mi].max(1, keepdim=True)
  800. x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]
  801. # Filter by class
  802. if classes is not None:
  803. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  804. # Apply finite constraint
  805. # if not torch.isfinite(x).all():
  806. # x = x[torch.isfinite(x).all(1)]
  807. # Check shape
  808. n = x.shape[0] # number of boxes
  809. if not n: # no boxes
  810. continue
  811. x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes
  812. # Batched NMS
  813. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  814. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  815. i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
  816. i = i[:max_det] # limit detections
  817. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  818. # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  819. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  820. weights = iou * scores[None] # box weights
  821. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  822. if redundant:
  823. i = i[iou.sum(1) > 1] # require redundancy
  824. output[xi] = x[i]
  825. if mps:
  826. output[xi] = output[xi].to(device)
  827. if (time.time() - t) > time_limit:
  828. LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded')
  829. break # time limit exceeded
  830. return output
  831. def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
  832. # Strip optimizer from 'f' to finalize training, optionally save as 's'
  833. x = torch.load(f, map_location=torch.device('cpu'))
  834. if x.get('ema'):
  835. x['model'] = x['ema'] # replace model with ema
  836. for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys
  837. x[k] = None
  838. x['epoch'] = -1
  839. x['model'].half() # to FP16
  840. for p in x['model'].parameters():
  841. p.requires_grad = False
  842. torch.save(x, s or f)
  843. mb = os.path.getsize(s or f) / 1E6 # filesize
  844. LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB")
  845. def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):
  846. evolve_csv = save_dir / 'evolve.csv'
  847. evolve_yaml = save_dir / 'hyp_evolve.yaml'
  848. keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps]
  849. keys = tuple(x.strip() for x in keys)
  850. vals = results + tuple(hyp.values())
  851. n = len(keys)
  852. # Download (optional)
  853. if bucket:
  854. url = f'gs://{bucket}/evolve.csv'
  855. if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0):
  856. subprocess.run(['gsutil', 'cp', f'{url}', f'{save_dir}']) # download evolve.csv if larger than local
  857. # Log to evolve.csv
  858. s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header
  859. with open(evolve_csv, 'a') as f:
  860. f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n')
  861. # Save yaml
  862. with open(evolve_yaml, 'w') as f:
  863. data = pd.read_csv(evolve_csv, skipinitialspace=True)
  864. data = data.rename(columns=lambda x: x.strip()) # strip keys
  865. i = np.argmax(fitness(data.values[:, :4])) #
  866. generations = len(data)
  867. f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' +
  868. f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) +
  869. '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n')
  870. yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False)
  871. # Print to screen
  872. LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix +
  873. ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}'
  874. for x in vals) + '\n\n')
  875. if bucket:
  876. subprocess.run(['gsutil', 'cp', f'{evolve_csv}', f'{evolve_yaml}', f'gs://{bucket}']) # upload
  877. def apply_classifier(x, model, img, im0):
  878. # Apply a second stage classifier to YOLO outputs
  879. # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
  880. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  881. for i, d in enumerate(x): # per image
  882. if d is not None and len(d):
  883. d = d.clone()
  884. # Reshape and pad cutouts
  885. b = xyxy2xywh(d[:, :4]) # boxes
  886. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  887. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  888. d[:, :4] = xywh2xyxy(b).long()
  889. # Rescale boxes from img_size to im0 size
  890. scale_boxes(img.shape[2:], d[:, :4], im0[i].shape)
  891. # Classes
  892. pred_cls1 = d[:, 5].long()
  893. ims = []
  894. for a in d:
  895. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  896. im = cv2.resize(cutout, (224, 224)) # BGR
  897. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  898. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  899. im /= 255 # 0 - 255 to 0.0 - 1.0
  900. ims.append(im)
  901. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  902. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  903. return x
  904. def apply_classifier1(x, model, img, im0,modelname):
  905. # Apply a second stage classifier to YOLO outputs
  906. # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
  907. #print(f'len = {len(im0)}')
  908. im0 = [im0] if isinstance(im0, np.ndarray) and len(im0.shape)==3 else im0
  909. #im0 = [im0]
  910. for i, d in enumerate(x): # per image
  911. if d is not None and len(d):
  912. d = d.clone()
  913. # Reshape and pad cutouts
  914. # b = xyxy2xywh(d[:, :4]) # boxes
  915. # b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  916. # b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  917. # d[:, :4] = xywh2xyxy(b).long()
  918. # Rescale boxes from img_size to im0 size
  919. scale_boxes(img.shape[2:], d[:, :4], im0[i].shape)
  920. # Classes
  921. pred_cls1 = d[:, 5].long()
  922. ims = []
  923. for a in d:
  924. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  925. #im = cv2.resize(cutout, (224, 224)) # BGR
  926. im = cutout[:, :, ::-1] # BGR to RGB, to 3x416x416
  927. im = Image.fromarray(np.uint8(im))
  928. im = test(im)
  929. im = im.to(a.device)
  930. #print(img.dtype)
  931. #im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  932. #im /= 255 # 0 - 255 to 0.0 - 1.0
  933. ims.append(im)
  934. ims = torch.stack(ims,dim=0)
  935. #pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  936. pred_cls2 = model(ims).argmax(1)
  937. print('cls')
  938. print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
  939. print(pred_cls2)
  940. print(pred_cls1)
  941. if modelname =='fall':
  942. x[i] = x[i][pred_cls2 == 0] # retain matching class detections
  943. else:
  944. x[i] = x[i][pred_cls1 == pred_cls2]
  945. return x
  946. def apply_classifieruniform(x, model, img, im0,modelname):
  947. # Apply a second stage classifier to YOLO outputs
  948. # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
  949. #im0 = [im0] if isinstance(im0, np.ndarray) else im0
  950. im0 = [im0] if isinstance(im0, np.ndarray) and len(im0)==1 and len(im0.shape)!=1 else im0
  951. print(type(x))
  952. xp = []
  953. xh = []
  954. for xi,xa in enumerate(x):
  955. print(xa)
  956. xp.append(xa[(xa[:, 5:6] == torch.tensor([4], device=xa.device)).any(1)])
  957. xh.append(xa[(xa[:, 5:6] == torch.tensor([1,2], device=xa.device)).any(1)])
  958. output = len(xp)*[[]]
  959. for i, d in enumerate(xp): # per image
  960. if d is not None and len(d):
  961. d = d.clone()
  962. # Reshape and pad cutouts
  963. # b = xyxy2xywh(d[:, :4]) # boxes
  964. # b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  965. # b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  966. # d[:, :4] = xywh2xyxy(b).long()
  967. # Rescale boxes from img_size to im0 size
  968. print(f'orid= {d}')
  969. scale_boxes(img.shape[2:], d[:, :4], im0[i].shape)
  970. # Classes
  971. pred_cls1 = d[:, 5].long()
  972. ims = []
  973. for a in d:
  974. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  975. cv2.imwrite(f'{time.time()}.jpg',cutout)
  976. #im = cv2.resize(cutout, (224, 224)) # BGR
  977. #im = cutout[:, :, ::-1] # BGR to RGB, to 3x416x416
  978. ims.append(cutout)
  979. #ims = torch.stack(ims,dim=0)
  980. #pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  981. print(f'ims= {len(ims)}')
  982. ims = torch.stack([v8transforms(Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))) for im in ims]).to(model.device)
  983. print(model(ims))
  984. pred_cls2 = model(ims).argmax(1)
  985. #if
  986. print('cls')
  987. print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
  988. print(f'pred_cls2 {pred_cls2}')
  989. print(pred_cls1)
  990. if modelname =='uniform':
  991. print(xp[i])
  992. xp[i] = xp[i][pred_cls2 == 0] # retain matching class detections
  993. else:
  994. xp[i] = xp[i][pred_cls1 == pred_cls2]
  995. if len(xp[i])>0:
  996. output[i] = torch.cat([xp[i],xh[i]],dim=0)
  997. #print(o)
  998. return output
  999. def apply_classifierarm(x, model, img, im0,modelname):
  1000. # Apply a second stage classifier to YOLO outputs
  1001. # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
  1002. #im0 = [im0] if isinstance(im0, np.ndarray) else im0
  1003. im0 = [im0] if isinstance(im0, np.ndarray) and len(im0.shape)==3 else im0
  1004. print(type(x))
  1005. xp = []
  1006. xh = []
  1007. #for xi,xa in enumerate(x):
  1008. # print(xa)
  1009. #xp.append(xa[(xa[:, 5:6] == torch.tensor([4], device=xa.device)).any(1)])
  1010. # x[xi][:,5] = 1-x[xi][:,5]
  1011. # xh.append(xa[(xa[:, 5:6] == torch.tensor([1,2], device=xa.device)).any(1)])
  1012. #output = len(xp)*[[]]
  1013. for i, d in enumerate(x): # per image
  1014. if d is not None and len(d):
  1015. d = d.clone()
  1016. # Reshape and pad cutouts
  1017. # b = xyxy2xywh(d[:, :4]) # boxes
  1018. # b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  1019. # b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  1020. # d[:, :4] = xywh2xyxy(b).long()
  1021. # Rescale boxes from img_size to im0 size
  1022. print(f'orid= {d}')
  1023. scale_boxes(img.shape[2:], d[:, :4], im0[i].shape)
  1024. # Classes
  1025. pred_cls1 = d[:, 5].long()
  1026. ims = []
  1027. for a in d:
  1028. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  1029. #cv2.imwrite(f'{time.time()}.jpg',cutout)
  1030. #im = cv2.resize(cutout, (224, 224)) # BGR
  1031. #im = cutout[:, :, ::-1] # BGR to RGB, to 3x416x416
  1032. ims.append(cutout)
  1033. #ims = torch.stack(ims,dim=0)
  1034. #pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  1035. print(f'ims= {len(ims)}')
  1036. ims = torch.stack([v8transforms(Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))) for im in ims]).to(model.device)
  1037. print(ims.size())
  1038. print(model(ims))
  1039. pred_cls2 = model(ims).argmax(1)
  1040. #if
  1041. print('cls')
  1042. print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
  1043. print(f'pred_cls2 {pred_cls2}')
  1044. print(pred_cls1)
  1045. pred_cls2 = 1 - pred_cls2
  1046. x[i] = x[i][pred_cls2==pred_cls1]
  1047. #x[i] = 1-x[i][:,5]
  1048. #x[i][:,5] = torch.tensor(0).to(x[i].device)
  1049. #if modelname =='uniform':
  1050. # print(xp[i])
  1051. # xp[i] = xp[i][pred_cls2 == 0] # retai n matching class detections
  1052. #else:
  1053. # xp[i] = xp[i][pred_cls1 == pred_cls2]
  1054. #if len(xp[i])>0:
  1055. # output[i] = torch.cat([xp[i],xh[i]],dim=0)
  1056. #print(o)
  1057. return x
  1058. def compute_IOU(rec1,rec2):
  1059. """
  1060. 计算两个矩形框的交并比。
  1061. :param rec1: (x0,y0,x1,y1) (x0,y0)代表矩形左上的顶点,(x1,y1)代表矩形右下的顶点。下同。
  1062. :param rec2: (x0,y0,x1,y1)
  1063. :return: 交并比IOU.
  1064. """
  1065. left_column_max = max(rec1[0],rec2[0])
  1066. right_column_min = min(rec1[2],rec2[2])
  1067. up_row_max = max(rec1[1],rec2[1])
  1068. down_row_min = min(rec1[3],rec2[3])
  1069. #两矩形无相交区域的情况
  1070. if left_column_max>=right_column_min or down_row_min<=up_row_max:
  1071. return 0,0
  1072. # 两矩形有相交区域的情况
  1073. else:
  1074. S1 = (rec1[2]-rec1[0])*(rec1[3]-rec1[1])
  1075. S2 = (rec2[2]-rec2[0])*(rec2[3]-rec2[1])
  1076. S_cross = (down_row_min-up_row_max)*(right_column_min-left_column_max)
  1077. x1 = min(rec1[0],rec2[0])
  1078. y1 = min(rec1[1],rec2[1])
  1079. x2 = max(rec1[2],rec2[2])
  1080. y2 = max(rec1[3],rec2[3])
  1081. return S_cross/(S1+S2-S_cross),torch.tensor((x1,y1,x2,y2))
  1082. def task(cur,conn,url,urla):
  1083. modelnamedir = {'0':'helmet','8':'danager','10':'uniform','14':'smoke','16':'fire','21':'cross','25':'fall','29':'occupancy','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'duty','98':'face','51':'run','64':'jump'}
  1084. modellabeldir = {'0':'head','8':'person','10':'other','14':'smoke','16':'fire','21':'cross','25':'fall','29':'car','30':'liquid','31':'pressure','32':'sleep','33':'conveyor','34':'personcount','35':'gloves','36':'sit','37':'other','38':'person','98':'face','51':'person','64':'person'}
  1085. modelalgdir = {'helmet': '0','danager': '8','uniform': '10','smoke': '14','fire': '16','cross': '21','fall': '25','occupancy': '29','liquid': '30','pressure': '31','sleep': '32','conveyor': '33','personcount': '34','gloves': '35','sit': '36','other': '37','duty': '38','face': '98','run': '51','jump':'64'}
  1086. data = {
  1087. "algorithmCode": None,
  1088. "deviceIp":None
  1089. }
  1090. result = requests.post(url=url,data=data).json()['data']
  1091. postlist = []
  1092. for info in result:
  1093. #print(f'{info['ip']}')
  1094. #print(f"{info['deviceIp']},{info['deviceAlgorithmIp']},{info['deviceChannel']},{info['videoName']},{info['videoPassword']},{info['algorithmCode']},{info['electricFence']}")
  1095. cursor = cur.execute("select point from stream where classindex = (?) and channel =(?)",(info['algorithmCode'],info['deviceChannel']))
  1096. resultsub = cursor.fetchall()
  1097. print(resultsub)
  1098. #if not result:
  1099. # print(123)
  1100. postlist.append((info['deviceChannel'],info['algorithmCode']))
  1101. if resultsub:
  1102. if info['algorithmCode'] not in modelnamedir:
  1103. continue
  1104. pointres = info['electricFence']
  1105. if resultsub[:-1] != pointres[:-1]:
  1106. print('true')
  1107. print(info['deviceChannel'])
  1108. print(f'electricFence {info["electricFence"]}')
  1109. if len(pointres)>0:
  1110. pointres = pointres[:-1]+ ':'
  1111. print(f'in{pointres}')
  1112. print(info['algorithmCode'],info['deviceChannel'])
  1113. cur.execute("UPDATE STREAM set fence ='1', point = (?) where classindex=(?) and channel = (?)",
  1114. (pointres,info['algorithmCode'],info['deviceChannel']))
  1115. cur.execute("select * from STREAM where classindex=(?) and channel = (?)",
  1116. (info['algorithmCode'], info['deviceChannel']))
  1117. print(cur.fetchall())
  1118. else:
  1119. cur.execute("UPDATE STREAM set fence ='0', point = (?) where classindex=(?) and channel = (?)",
  1120. (pointres, info['algorithmCode'], info['deviceChannel']))
  1121. cur.execute("SELECT * from STREAM where classindex=(?) and channel = (?)",
  1122. (info['algorithmCode'], info['deviceChannel']))
  1123. print(cur.fetchall())
  1124. print('commit')
  1125. conn.commit()
  1126. else:
  1127. #cur.execute("UPDATE STREAM set fence ='1', point = (?) where channel=(?) and classindex = (?)",
  1128. # (pointres, info['algorithmCode'], info['deviceChannel']))
  1129. if info['deviceIp'] is not None:
  1130. if info['algorithmCode'] not in modelnamedir:
  1131. continue
  1132. #address = f"rtsp://{info['videoName']}:{info['videoPassword']}@{info['deviceIp']}:554/Streaming/Channels/1"
  1133. address = info['playbackAddress']
  1134. label = modellabeldir[info['algorithmCode']]
  1135. modelname = modelnamedir[info['algorithmCode']]
  1136. print(modelname)
  1137. print(label)
  1138. print(address)
  1139. if modelname == 'duty':
  1140. durtime = 300
  1141. else:
  1142. durtime = 0
  1143. if info['electricFence']:
  1144. fence = 1
  1145. point = f"{info['electricFence'][:-1]}:"
  1146. print(info['electricFence'])
  1147. else:
  1148. fence = 0
  1149. point = '0'
  1150. channel = info['deviceChannel']
  1151. code = info['algorithmCode']
  1152. ip = info['deviceIp']
  1153. algdevice = info['fwqCode']
  1154. cur.execute("INSERT INTO STREAM (MODELNAME,ADDRESS,FENCE,POINT,CHANNEL,CLASSINDEX,IP,ALGIP,ALGDEVICE,LABEL,DURTIME) \
  1155. VALUES ((?),(?),(?),(?),(?),(?),(?),(?),(?),(?),(?))",(modelname,address,fence,point,channel,code,ip,info['deviceAlgorithmIp'],algdevice,label,durtime))
  1156. print('add--------------------------------------------------------')
  1157. cur.execute("select channel,classindex from stream")
  1158. realist = cur.fetchall()
  1159. for r in realist:
  1160. if r not in postlist:
  1161. cur.execute("delete from stream where channel = (?)and classindex = (?)",(r[0],r[1]))
  1162. conn.commit()
  1163. cursor = cur.execute("select modelname from stream")
  1164. modellist = set(cursor.fetchall())
  1165. cursor = cur.execute("select modelname from changestream")
  1166. changemodellist = set(cursor.fetchall())
  1167. for model in modellist:
  1168. if model not in changemodellist:
  1169. print(model[0])
  1170. a = modelalgdir[model[0]]
  1171. #if a!='0':
  1172. rea = requests.post(url=urla,data={'algorithmCode':a}).json()['data']
  1173. #print(rea)
  1174. if len(rea)>0:
  1175. con = rea[0]['confidence']
  1176. else:
  1177. con =0.25
  1178. cla = 0
  1179. cur.execute("INSERT INTO CHANGESTREAM (MODELNAME,ADDSTREAM,DELSTREAM,STREAMING,CONF,CLA) \
  1180. VALUES ((?),0,0,0,(?),(?))",(model[0],con,cla))
  1181. #else:
  1182. # con = 0.25
  1183. # cla = 0
  1184. # cur.execute("INSERT INTO CHANGESTREAM (MODELNAME,ADDSTREAM,DELSTREAM,STREAMING,CONF,CLA) \
  1185. # VALUES ((?),0,0,0,(?),(?))",(model[0],con,cla))
  1186. if ('stream',) not in changemodellist:
  1187. cur.execute("INSERT INTO CHANGESTREAM (MODELNAME,ADDSTREAM,DELSTREAM,STREAMING,CONF,CLA) \
  1188. VALUES ('stream',0,0,0,0,0)")
  1189. conn.commit()
  1190. def increment_path(path, exist_ok=False, sep='', mkdir=False):
  1191. # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
  1192. path = Path(path) # os-agnostic
  1193. if path.exists() and not exist_ok:
  1194. path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
  1195. # Method 1
  1196. for n in range(2, 9999):
  1197. p = f'{path}{sep}{n}{suffix}' # increment path
  1198. if not os.path.exists(p): #
  1199. break
  1200. path = Path(p)
  1201. # Method 2 (deprecated)
  1202. # dirs = glob.glob(f"{path}{sep}*") # similar paths
  1203. # matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs]
  1204. # i = [int(m.groups()[0]) for m in matches if m] # indices
  1205. # n = max(i) + 1 if i else 2 # increment number
  1206. # path = Path(f"{path}{sep}{n}{suffix}") # increment path
  1207. if mkdir:
  1208. path.mkdir(parents=True, exist_ok=True) # make directory
  1209. return path
  1210. # OpenCV Multilanguage-friendly functions ------------------------------------------------------------------------------------
  1211. imshow_ = cv2.imshow # copy to avoid recursion errors
  1212. def imread(filename, flags=cv2.IMREAD_COLOR):
  1213. return cv2.imdecode(np.fromfile(filename, np.uint8), flags)
  1214. def imwrite(filename, img):
  1215. try:
  1216. cv2.imencode(Path(filename).suffix, img)[1].tofile(filename)
  1217. return True
  1218. except Exception:
  1219. return False
  1220. def imshow(path, im):
  1221. imshow_(path.encode('unicode_escape').decode(), im)
  1222. if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename:
  1223. cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine
  1224. # Variables ------------------------------------------------------------------------------------------------------------