Code refactor (#7923)
Browse files* Code refactor for general.py
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Update restapi.py
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
- utils/flask_rest_api/restapi.py +1 -1
- utils/general.py +36 -43
utils/flask_rest_api/restapi.py
CHANGED
@@ -17,7 +17,7 @@ DETECTION_URL = "/v1/object-detection/yolov5s"
|
|
17 |
|
18 |
@app.route(DETECTION_URL, methods=["POST"])
|
19 |
def predict():
|
20 |
-
if
|
21 |
return
|
22 |
|
23 |
if request.files.get("image"):
|
|
|
17 |
|
18 |
@app.route(DETECTION_URL, methods=["POST"])
|
19 |
def predict():
|
20 |
+
if request.method != "POST":
|
21 |
return
|
22 |
|
23 |
if request.files.get("image"):
|
utils/general.py
CHANGED
@@ -67,17 +67,16 @@ def is_kaggle():
|
|
67 |
|
68 |
def is_writeable(dir, test=False):
|
69 |
# Return True if directory has write permissions, test opening a file with write permissions if test=True
|
70 |
-
if test:
|
71 |
-
file = Path(dir) / 'tmp.txt'
|
72 |
-
try:
|
73 |
-
with open(file, 'w'): # open file with write permissions
|
74 |
-
pass
|
75 |
-
file.unlink() # remove file
|
76 |
-
return True
|
77 |
-
except OSError:
|
78 |
-
return False
|
79 |
-
else: # method 2
|
80 |
return os.access(dir, os.R_OK) # possible issues on Windows
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
|
83 |
def set_logging(name=None, verbose=VERBOSE):
|
@@ -244,7 +243,7 @@ def is_ascii(s=''):
|
|
244 |
|
245 |
def is_chinese(s='人工智能'):
|
246 |
# Is string composed of any Chinese characters?
|
247 |
-
return
|
248 |
|
249 |
|
250 |
def emojis(str=''):
|
@@ -417,7 +416,7 @@ def check_file(file, suffix=''):
|
|
417 |
# Search/download file (if necessary) and return path
|
418 |
check_suffix(file, suffix) # optional
|
419 |
file = str(file) # convert to str()
|
420 |
-
if Path(file).is_file() or file
|
421 |
return file
|
422 |
elif file.startswith(('http:/', 'https:/')): # download
|
423 |
url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/
|
@@ -481,28 +480,26 @@ def check_dataset(data, autodownload=True):
|
|
481 |
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
|
482 |
if not all(x.exists() for x in val):
|
483 |
LOGGER.info(emojis('\nDataset not found ⚠, missing paths %s' % [str(x) for x in val if not x.exists()]))
|
484 |
-
if s
|
485 |
-
t = time.time()
|
486 |
-
root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
|
487 |
-
if s.startswith('http') and s.endswith('.zip'): # URL
|
488 |
-
f = Path(s).name # filename
|
489 |
-
LOGGER.info(f'Downloading {s} to {f}...')
|
490 |
-
torch.hub.download_url_to_file(s, f)
|
491 |
-
Path(root).mkdir(parents=True, exist_ok=True) # create root
|
492 |
-
ZipFile(f).extractall(path=root) # unzip
|
493 |
-
Path(f).unlink() # remove zip
|
494 |
-
r = None # success
|
495 |
-
elif s.startswith('bash '): # bash script
|
496 |
-
LOGGER.info(f'Running {s} ...')
|
497 |
-
r = os.system(s)
|
498 |
-
else: # python script
|
499 |
-
r = exec(s, {'yaml': data}) # return None
|
500 |
-
dt = f'({round(time.time() - t, 1)}s)'
|
501 |
-
s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌"
|
502 |
-
LOGGER.info(emojis(f"Dataset download {s}"))
|
503 |
-
else:
|
504 |
raise Exception(emojis('Dataset not found ❌'))
|
505 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
506 |
check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts
|
507 |
return data # dictionary
|
508 |
|
@@ -531,8 +528,7 @@ def check_amp(model):
|
|
531 |
def url2file(url):
|
532 |
# Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
|
533 |
url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
|
534 |
-
|
535 |
-
return file
|
536 |
|
537 |
|
538 |
def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):
|
@@ -645,10 +641,9 @@ def labels_to_class_weights(labels, nc=80):
|
|
645 |
|
646 |
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
|
647 |
# Produces image weights based on class_weights and image contents
|
|
|
648 |
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
|
649 |
-
|
650 |
-
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
|
651 |
-
return image_weights
|
652 |
|
653 |
|
654 |
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
|
@@ -657,11 +652,10 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
|
|
657 |
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
|
658 |
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
|
659 |
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
|
660 |
-
|
661 |
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
|
662 |
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
|
663 |
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
|
664 |
-
return x
|
665 |
|
666 |
|
667 |
def xyxy2xywh(x):
|
@@ -883,7 +877,7 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op
|
|
883 |
p.requires_grad = False
|
884 |
torch.save(x, s or f)
|
885 |
mb = os.path.getsize(s or f) / 1E6 # filesize
|
886 |
-
LOGGER.info(f"Optimizer stripped from {f},{
|
887 |
|
888 |
|
889 |
def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):
|
@@ -946,10 +940,9 @@ def apply_classifier(x, model, img, im0):
|
|
946 |
# Classes
|
947 |
pred_cls1 = d[:, 5].long()
|
948 |
ims = []
|
949 |
-
for
|
950 |
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
|
951 |
im = cv2.resize(cutout, (224, 224)) # BGR
|
952 |
-
# cv2.imwrite('example%i.jpg' % j, cutout)
|
953 |
|
954 |
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
955 |
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
|
|
|
67 |
|
68 |
def is_writeable(dir, test=False):
|
69 |
# Return True if directory has write permissions, test opening a file with write permissions if test=True
|
70 |
+
if not test:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
return os.access(dir, os.R_OK) # possible issues on Windows
|
72 |
+
file = Path(dir) / 'tmp.txt'
|
73 |
+
try:
|
74 |
+
with open(file, 'w'): # open file with write permissions
|
75 |
+
pass
|
76 |
+
file.unlink() # remove file
|
77 |
+
return True
|
78 |
+
except OSError:
|
79 |
+
return False
|
80 |
|
81 |
|
82 |
def set_logging(name=None, verbose=VERBOSE):
|
|
|
243 |
|
244 |
def is_chinese(s='人工智能'):
|
245 |
# Is string composed of any Chinese characters?
|
246 |
+
return bool(re.search('[\u4e00-\u9fff]', str(s)))
|
247 |
|
248 |
|
249 |
def emojis(str=''):
|
|
|
416 |
# Search/download file (if necessary) and return path
|
417 |
check_suffix(file, suffix) # optional
|
418 |
file = str(file) # convert to str()
|
419 |
+
if Path(file).is_file() or not file: # exists
|
420 |
return file
|
421 |
elif file.startswith(('http:/', 'https:/')): # download
|
422 |
url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/
|
|
|
480 |
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
|
481 |
if not all(x.exists() for x in val):
|
482 |
LOGGER.info(emojis('\nDataset not found ⚠, missing paths %s' % [str(x) for x in val if not x.exists()]))
|
483 |
+
if not s or not autodownload:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
484 |
raise Exception(emojis('Dataset not found ❌'))
|
485 |
+
t = time.time()
|
486 |
+
root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
|
487 |
+
if s.startswith('http') and s.endswith('.zip'): # URL
|
488 |
+
f = Path(s).name # filename
|
489 |
+
LOGGER.info(f'Downloading {s} to {f}...')
|
490 |
+
torch.hub.download_url_to_file(s, f)
|
491 |
+
Path(root).mkdir(parents=True, exist_ok=True) # create root
|
492 |
+
ZipFile(f).extractall(path=root) # unzip
|
493 |
+
Path(f).unlink() # remove zip
|
494 |
+
r = None # success
|
495 |
+
elif s.startswith('bash '): # bash script
|
496 |
+
LOGGER.info(f'Running {s} ...')
|
497 |
+
r = os.system(s)
|
498 |
+
else: # python script
|
499 |
+
r = exec(s, {'yaml': data}) # return None
|
500 |
+
dt = f'({round(time.time() - t, 1)}s)'
|
501 |
+
s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌"
|
502 |
+
LOGGER.info(emojis(f"Dataset download {s}"))
|
503 |
check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts
|
504 |
return data # dictionary
|
505 |
|
|
|
528 |
def url2file(url):
|
529 |
# Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
|
530 |
url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
|
531 |
+
return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
|
|
|
532 |
|
533 |
|
534 |
def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):
|
|
|
641 |
|
642 |
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
|
643 |
# Produces image weights based on class_weights and image contents
|
644 |
+
# Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample
|
645 |
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
|
646 |
+
return (class_weights.reshape(1, nc) * class_counts).sum(1)
|
|
|
|
|
647 |
|
648 |
|
649 |
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
|
|
|
652 |
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
|
653 |
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
|
654 |
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
|
655 |
+
return [
|
656 |
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
|
657 |
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
|
658 |
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
|
|
|
659 |
|
660 |
|
661 |
def xyxy2xywh(x):
|
|
|
877 |
p.requires_grad = False
|
878 |
torch.save(x, s or f)
|
879 |
mb = os.path.getsize(s or f) / 1E6 # filesize
|
880 |
+
LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB")
|
881 |
|
882 |
|
883 |
def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):
|
|
|
940 |
# Classes
|
941 |
pred_cls1 = d[:, 5].long()
|
942 |
ims = []
|
943 |
+
for a in d:
|
944 |
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
|
945 |
im = cv2.resize(cutout, (224, 224)) # BGR
|
|
|
946 |
|
947 |
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
948 |
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
|