@@ -67,17 +67,16 @@ def is_kaggle():
6767
6868def is_writeable (dir , test = False ):
6969 # Return True if directory has write permissions, test opening a file with write permissions if test=True
70- if test : # method 1
71- file = Path (dir ) / 'tmp.txt'
72- try :
73- with open (file , 'w' ): # open file with write permissions
74- pass
75- file .unlink () # remove file
76- return True
77- except OSError :
78- return False
79- else : # method 2
70+ if not test :
8071 return os .access (dir , os .R_OK ) # possible issues on Windows
72+ file = Path (dir ) / 'tmp.txt'
73+ try :
74+ with open (file , 'w' ): # open file with write permissions
75+ pass
76+ file .unlink () # remove file
77+ return True
78+ except OSError :
79+ return False
8180
8281
8382def set_logging (name = None , verbose = VERBOSE ):
@@ -244,7 +243,7 @@ def is_ascii(s=''):
244243
245244def is_chinese (s = '人工智能' ):
246245 # Is string composed of any Chinese characters?
247- return True if re .search ('[\u4e00 -\u9fff ]' , str (s )) else False
246+ return bool ( re .search ('[\u4e00 -\u9fff ]' , str (s )))
248247
249248
250249def emojis (str = '' ):
@@ -417,7 +416,7 @@ def check_file(file, suffix=''):
417416 # Search/download file (if necessary) and return path
418417 check_suffix (file , suffix ) # optional
419418 file = str (file ) # convert to str()
420- if Path (file ).is_file () or file == '' : # exists
419+ if Path (file ).is_file () or not file : # exists
421420 return file
422421 elif file .startswith (('http:/' , 'https:/' )): # download
423422 url = str (Path (file )).replace (':/' , '://' ) # Pathlib turns :// -> :/
@@ -481,28 +480,26 @@ def check_dataset(data, autodownload=True):
481480 val = [Path (x ).resolve () for x in (val if isinstance (val , list ) else [val ])] # val path
482481 if not all (x .exists () for x in val ):
483482 LOGGER .info (emojis ('\n Dataset not found ⚠, missing paths %s' % [str (x ) for x in val if not x .exists ()]))
484- if s and autodownload : # download script
485- t = time .time ()
486- root = path .parent if 'path' in data else '..' # unzip directory i.e. '../'
487- if s .startswith ('http' ) and s .endswith ('.zip' ): # URL
488- f = Path (s ).name # filename
489- LOGGER .info (f'Downloading { s } to { f } ...' )
490- torch .hub .download_url_to_file (s , f )
491- Path (root ).mkdir (parents = True , exist_ok = True ) # create root
492- ZipFile (f ).extractall (path = root ) # unzip
493- Path (f ).unlink () # remove zip
494- r = None # success
495- elif s .startswith ('bash ' ): # bash script
496- LOGGER .info (f'Running { s } ...' )
497- r = os .system (s )
498- else : # python script
499- r = exec (s , {'yaml' : data }) # return None
500- dt = f'({ round (time .time () - t , 1 )} s)'
501- s = f"success ✅ { dt } , saved to { colorstr ('bold' , root )} " if r in (0 , None ) else f"failure { dt } ❌"
502- LOGGER .info (emojis (f"Dataset download { s } " ))
503- else :
483+ if not s or not autodownload :
504484 raise Exception (emojis ('Dataset not found ❌' ))
505-
485+ t = time .time ()
486+ root = path .parent if 'path' in data else '..' # unzip directory i.e. '../'
487+ if s .startswith ('http' ) and s .endswith ('.zip' ): # URL
488+ f = Path (s ).name # filename
489+ LOGGER .info (f'Downloading { s } to { f } ...' )
490+ torch .hub .download_url_to_file (s , f )
491+ Path (root ).mkdir (parents = True , exist_ok = True ) # create root
492+ ZipFile (f ).extractall (path = root ) # unzip
493+ Path (f ).unlink () # remove zip
494+ r = None # success
495+ elif s .startswith ('bash ' ): # bash script
496+ LOGGER .info (f'Running { s } ...' )
497+ r = os .system (s )
498+ else : # python script
499+ r = exec (s , {'yaml' : data }) # return None
500+ dt = f'({ round (time .time () - t , 1 )} s)'
501+ s = f"success ✅ { dt } , saved to { colorstr ('bold' , root )} " if r in (0 , None ) else f"failure { dt } ❌"
502+ LOGGER .info (emojis (f"Dataset download { s } " ))
506503 check_font ('Arial.ttf' if is_ascii (data ['names' ]) else 'Arial.Unicode.ttf' , progress = True ) # download fonts
507504 return data # dictionary
508505
@@ -531,8 +528,7 @@ def check_amp(model):
531528def url2file (url ):
532529 # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
533530 url = str (Path (url )).replace (':/' , '://' ) # Pathlib turns :// -> :/
534- file = Path (urllib .parse .unquote (url )).name .split ('?' )[0 ] # '%2F' to '/', split https://url.com/file.txt?auth
535- return file
531+ return Path (urllib .parse .unquote (url )).name .split ('?' )[0 ] # '%2F' to '/', split https://url.com/file.txt?auth
536532
537533
538534def download (url , dir = '.' , unzip = True , delete = True , curl = False , threads = 1 , retry = 3 ):
@@ -645,10 +641,9 @@ def labels_to_class_weights(labels, nc=80):
645641
646642def labels_to_image_weights (labels , nc = 80 , class_weights = np .ones (80 )):
647643 # Produces image weights based on class_weights and image contents
644+ # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample
648645 class_counts = np .array ([np .bincount (x [:, 0 ].astype (np .int ), minlength = nc ) for x in labels ])
649- image_weights = (class_weights .reshape (1 , nc ) * class_counts ).sum (1 )
650- # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
651- return image_weights
646+ return (class_weights .reshape (1 , nc ) * class_counts ).sum (1 )
652647
653648
654649def coco80_to_coco91_class (): # converts 80-index (val2014) to 91-index (paper)
@@ -657,11 +652,10 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
657652 # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
658653 # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
659654 # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
660- x = [
655+ return [
661656 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 13 , 14 , 15 , 16 , 17 , 18 , 19 , 20 , 21 , 22 , 23 , 24 , 25 , 27 , 28 , 31 , 32 , 33 , 34 ,
662657 35 , 36 , 37 , 38 , 39 , 40 , 41 , 42 , 43 , 44 , 46 , 47 , 48 , 49 , 50 , 51 , 52 , 53 , 54 , 55 , 56 , 57 , 58 , 59 , 60 , 61 , 62 , 63 ,
663658 64 , 65 , 67 , 70 , 72 , 73 , 74 , 75 , 76 , 77 , 78 , 79 , 80 , 81 , 82 , 84 , 85 , 86 , 87 , 88 , 89 , 90 ]
664- return x
665659
666660
667661def xyxy2xywh (x ):
@@ -883,7 +877,7 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op
883877 p .requires_grad = False
884878 torch .save (x , s or f )
885879 mb = os .path .getsize (s or f ) / 1E6 # filesize
886- LOGGER .info (f"Optimizer stripped from { f } ,{ ( ' saved as %s,' % s ) if s else '' } { mb :.1f} MB" )
880+ LOGGER .info (f"Optimizer stripped from { f } ,{ f ' saved as { s } ,' if s else '' } { mb :.1f} MB" )
887881
888882
889883def print_mutation (results , hyp , save_dir , bucket , prefix = colorstr ('evolve: ' )):
@@ -946,10 +940,9 @@ def apply_classifier(x, model, img, im0):
946940 # Classes
947941 pred_cls1 = d [:, 5 ].long ()
948942 ims = []
949- for j , a in enumerate ( d ): # per item
943+ for a in d :
950944 cutout = im0 [i ][int (a [1 ]):int (a [3 ]), int (a [0 ]):int (a [2 ])]
951945 im = cv2 .resize (cutout , (224 , 224 )) # BGR
952- # cv2.imwrite('example%i.jpg' % j, cutout)
953946
954947 im = im [:, :, ::- 1 ].transpose (2 , 0 , 1 ) # BGR to RGB, to 3x416x416
955948 im = np .ascontiguousarray (im , dtype = np .float32 ) # uint8 to float32
0 commit comments