Fix 6 Flake8 issues (#6541)
Browse files* F541
* F821
* F841
* E741
* E302
* E722
* Apply suggestions from code review
* Update general.py
* Update datasets.py
* Update export.py
* Update plots.py
* Update plots.py
Co-authored-by: Glenn Jocher <[email protected]>
- export.py +8 -6
- models/tf.py +2 -2
- setup.cfg +0 -6
- utils/datasets.py +27 -26
- utils/downloads.py +2 -2
- utils/general.py +8 -7
- utils/loggers/wandb/wandb_utils.py +1 -1
- utils/metrics.py +1 -0
- utils/plots.py +2 -2
- utils/torch_utils.py +4 -4
export.py
CHANGED
@@ -244,7 +244,7 @@ def export_saved_model(model, im, file, dynamic,
|
|
244 |
|
245 |
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
246 |
im = tf.zeros((batch_size, *imgsz, 3)) # BHWC order for TensorFlow
|
247 |
-
|
248 |
inputs = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
|
249 |
outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
250 |
keras_model = keras.Model(inputs=inputs, outputs=outputs)
|
@@ -407,16 +407,17 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
|
|
407 |
tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs')) # TensorFlow exports
|
408 |
file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights)
|
409 |
|
410 |
-
# Checks
|
411 |
-
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
|
412 |
-
opset = 12 if ('openvino' in include) else opset # OpenVINO requires opset <= 12
|
413 |
-
|
414 |
# Load PyTorch model
|
415 |
device = select_device(device)
|
416 |
assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0'
|
417 |
model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model
|
418 |
nc, names = model.nc, model.names # number of classes, class names
|
419 |
|
|
|
|
|
|
|
|
|
|
|
420 |
# Input
|
421 |
gs = int(max(model.stride)) # grid size (max stride)
|
422 |
imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
|
@@ -438,7 +439,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
|
|
438 |
|
439 |
for _ in range(2):
|
440 |
y = model(im) # dry runs
|
441 |
-
|
|
|
442 |
|
443 |
# Exports
|
444 |
f = [''] * 10 # exported filenames
|
|
|
244 |
|
245 |
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
246 |
im = tf.zeros((batch_size, *imgsz, 3)) # BHWC order for TensorFlow
|
247 |
+
_ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
248 |
inputs = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
|
249 |
outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
250 |
keras_model = keras.Model(inputs=inputs, outputs=outputs)
|
|
|
407 |
tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs')) # TensorFlow exports
|
408 |
file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights)
|
409 |
|
|
|
|
|
|
|
|
|
410 |
# Load PyTorch model
|
411 |
device = select_device(device)
|
412 |
assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0'
|
413 |
model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model
|
414 |
nc, names = model.nc, model.names # number of classes, class names
|
415 |
|
416 |
+
# Checks
|
417 |
+
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
|
418 |
+
opset = 12 if ('openvino' in include) else opset # OpenVINO requires opset <= 12
|
419 |
+
assert nc == len(names), f'Model class count {nc} != len(names) {len(names)}'
|
420 |
+
|
421 |
# Input
|
422 |
gs = int(max(model.stride)) # grid size (max stride)
|
423 |
imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
|
|
|
439 |
|
440 |
for _ in range(2):
|
441 |
y = model(im) # dry runs
|
442 |
+
shape = tuple(y[0].shape) # model output shape
|
443 |
+
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
|
444 |
|
445 |
# Exports
|
446 |
f = [''] * 10 # exported filenames
|
models/tf.py
CHANGED
@@ -427,13 +427,13 @@ def run(weights=ROOT / 'yolov5s.pt', # weights path
|
|
427 |
# PyTorch model
|
428 |
im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
|
429 |
model = attempt_load(weights, map_location=torch.device('cpu'), inplace=True, fuse=False)
|
430 |
-
|
431 |
model.info()
|
432 |
|
433 |
# TensorFlow model
|
434 |
im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image
|
435 |
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
436 |
-
|
437 |
|
438 |
# Keras model
|
439 |
im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
|
|
|
427 |
# PyTorch model
|
428 |
im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
|
429 |
model = attempt_load(weights, map_location=torch.device('cpu'), inplace=True, fuse=False)
|
430 |
+
_ = model(im) # inference
|
431 |
model.info()
|
432 |
|
433 |
# TensorFlow model
|
434 |
im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image
|
435 |
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
436 |
+
_ = tf_model.predict(im) # inference
|
437 |
|
438 |
# Keras model
|
439 |
im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
|
setup.cfg
CHANGED
@@ -30,10 +30,6 @@ ignore =
|
|
30 |
E731 # Do not assign a lambda expression, use a def
|
31 |
F405 # name may be undefined, or defined from star imports: module
|
32 |
E402 # module level import not at top of file
|
33 |
-
F841 # local variable name is assigned to but never used
|
34 |
-
E741 # do not use variables named ‘l’, ‘O’, or ‘I’
|
35 |
-
F821 # undefined name name
|
36 |
-
E722 # do not use bare except, specify exception instead
|
37 |
F401 # module imported but unused
|
38 |
W504 # line break after binary operator
|
39 |
E127 # continuation line over-indented for visual indent
|
@@ -41,8 +37,6 @@ ignore =
|
|
41 |
E231 # missing whitespace after ‘,’, ‘;’, or ‘:’
|
42 |
E501 # line too long
|
43 |
F403 # ‘from module import *’ used; unable to detect undefined names
|
44 |
-
E302 # expected 2 blank lines, found 0
|
45 |
-
F541 # f-string without any placeholders
|
46 |
|
47 |
|
48 |
[isort]
|
|
|
30 |
E731 # Do not assign a lambda expression, use a def
|
31 |
F405 # name may be undefined, or defined from star imports: module
|
32 |
E402 # module level import not at top of file
|
|
|
|
|
|
|
|
|
33 |
F401 # module imported but unused
|
34 |
W504 # line break after binary operator
|
35 |
E127 # continuation line over-indented for visual indent
|
|
|
37 |
E231 # missing whitespace after ‘,’, ‘;’, or ‘:’
|
38 |
E501 # line too long
|
39 |
F403 # ‘from module import *’ used; unable to detect undefined names
|
|
|
|
|
40 |
|
41 |
|
42 |
[isort]
|
utils/datasets.py
CHANGED
@@ -59,7 +59,7 @@ def exif_size(img):
|
|
59 |
s = (s[1], s[0])
|
60 |
elif rotation == 8: # rotation 90
|
61 |
s = (s[1], s[0])
|
62 |
-
except:
|
63 |
pass
|
64 |
|
65 |
return s
|
@@ -420,7 +420,7 @@ class LoadImagesAndLabels(Dataset):
|
|
420 |
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
|
421 |
assert cache['version'] == self.cache_version # same version
|
422 |
assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash
|
423 |
-
except:
|
424 |
cache, exists = self.cache_labels(cache_path, prefix), False # cache
|
425 |
|
426 |
# Display cache
|
@@ -514,13 +514,13 @@ class LoadImagesAndLabels(Dataset):
|
|
514 |
with Pool(NUM_THREADS) as pool:
|
515 |
pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
|
516 |
desc=desc, total=len(self.img_files))
|
517 |
-
for im_file,
|
518 |
nm += nm_f
|
519 |
nf += nf_f
|
520 |
ne += ne_f
|
521 |
nc += nc_f
|
522 |
if im_file:
|
523 |
-
x[im_file] = [
|
524 |
if msg:
|
525 |
msgs.append(msg)
|
526 |
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt"
|
@@ -627,8 +627,8 @@ class LoadImagesAndLabels(Dataset):
|
|
627 |
@staticmethod
|
628 |
def collate_fn(batch):
|
629 |
img, label, path, shapes = zip(*batch) # transposed
|
630 |
-
for i,
|
631 |
-
|
632 |
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
|
633 |
|
634 |
@staticmethod
|
@@ -645,15 +645,15 @@ class LoadImagesAndLabels(Dataset):
|
|
645 |
if random.random() < 0.5:
|
646 |
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[
|
647 |
0].type(img[i].type())
|
648 |
-
|
649 |
else:
|
650 |
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
|
651 |
-
|
652 |
img4.append(im)
|
653 |
-
label4.append(
|
654 |
|
655 |
-
for i,
|
656 |
-
|
657 |
|
658 |
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
|
659 |
|
@@ -743,6 +743,7 @@ def load_mosaic9(self, index):
|
|
743 |
s = self.img_size
|
744 |
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
|
745 |
random.shuffle(indices)
|
|
|
746 |
for i, index in enumerate(indices):
|
747 |
# Load image
|
748 |
img, _, (h, w) = load_image(self, index)
|
@@ -906,30 +907,30 @@ def verify_image_label(args):
|
|
906 |
if os.path.isfile(lb_file):
|
907 |
nf = 1 # label found
|
908 |
with open(lb_file) as f:
|
909 |
-
|
910 |
-
if any([len(x) > 8 for x in
|
911 |
-
classes = np.array([x[0] for x in
|
912 |
-
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in
|
913 |
-
|
914 |
-
|
915 |
-
nl = len(
|
916 |
if nl:
|
917 |
-
assert
|
918 |
-
assert (
|
919 |
-
assert (
|
920 |
-
_, i = np.unique(
|
921 |
if len(i) < nl: # duplicate row check
|
922 |
-
|
923 |
if segments:
|
924 |
segments = segments[i]
|
925 |
msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed'
|
926 |
else:
|
927 |
ne = 1 # label empty
|
928 |
-
|
929 |
else:
|
930 |
nm = 1 # label missing
|
931 |
-
|
932 |
-
return im_file,
|
933 |
except Exception as e:
|
934 |
nc = 1
|
935 |
msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'
|
|
|
59 |
s = (s[1], s[0])
|
60 |
elif rotation == 8: # rotation 90
|
61 |
s = (s[1], s[0])
|
62 |
+
except Exception:
|
63 |
pass
|
64 |
|
65 |
return s
|
|
|
420 |
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
|
421 |
assert cache['version'] == self.cache_version # same version
|
422 |
assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash
|
423 |
+
except Exception:
|
424 |
cache, exists = self.cache_labels(cache_path, prefix), False # cache
|
425 |
|
426 |
# Display cache
|
|
|
514 |
with Pool(NUM_THREADS) as pool:
|
515 |
pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
|
516 |
desc=desc, total=len(self.img_files))
|
517 |
+
for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
|
518 |
nm += nm_f
|
519 |
nf += nf_f
|
520 |
ne += ne_f
|
521 |
nc += nc_f
|
522 |
if im_file:
|
523 |
+
x[im_file] = [lb, shape, segments]
|
524 |
if msg:
|
525 |
msgs.append(msg)
|
526 |
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt"
|
|
|
627 |
@staticmethod
|
628 |
def collate_fn(batch):
|
629 |
img, label, path, shapes = zip(*batch) # transposed
|
630 |
+
for i, lb in enumerate(label):
|
631 |
+
lb[:, 0] = i # add target image index for build_targets()
|
632 |
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
|
633 |
|
634 |
@staticmethod
|
|
|
645 |
if random.random() < 0.5:
|
646 |
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[
|
647 |
0].type(img[i].type())
|
648 |
+
lb = label[i]
|
649 |
else:
|
650 |
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
|
651 |
+
lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
|
652 |
img4.append(im)
|
653 |
+
label4.append(lb)
|
654 |
|
655 |
+
for i, lb in enumerate(label4):
|
656 |
+
lb[:, 0] = i # add target image index for build_targets()
|
657 |
|
658 |
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
|
659 |
|
|
|
743 |
s = self.img_size
|
744 |
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
|
745 |
random.shuffle(indices)
|
746 |
+
hp, wp = -1, -1 # height, width previous
|
747 |
for i, index in enumerate(indices):
|
748 |
# Load image
|
749 |
img, _, (h, w) = load_image(self, index)
|
|
|
907 |
if os.path.isfile(lb_file):
|
908 |
nf = 1 # label found
|
909 |
with open(lb_file) as f:
|
910 |
+
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
|
911 |
+
if any([len(x) > 8 for x in lb]): # is segment
|
912 |
+
classes = np.array([x[0] for x in lb], dtype=np.float32)
|
913 |
+
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
|
914 |
+
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
|
915 |
+
lb = np.array(lb, dtype=np.float32)
|
916 |
+
nl = len(lb)
|
917 |
if nl:
|
918 |
+
assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'
|
919 |
+
assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'
|
920 |
+
assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'
|
921 |
+
_, i = np.unique(lb, axis=0, return_index=True)
|
922 |
if len(i) < nl: # duplicate row check
|
923 |
+
lb = lb[i] # remove duplicates
|
924 |
if segments:
|
925 |
segments = segments[i]
|
926 |
msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed'
|
927 |
else:
|
928 |
ne = 1 # label empty
|
929 |
+
lb = np.zeros((0, 5), dtype=np.float32)
|
930 |
else:
|
931 |
nm = 1 # label missing
|
932 |
+
lb = np.zeros((0, 5), dtype=np.float32)
|
933 |
+
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
|
934 |
except Exception as e:
|
935 |
nc = 1
|
936 |
msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'
|
utils/downloads.py
CHANGED
@@ -62,12 +62,12 @@ def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads i
|
|
62 |
response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api
|
63 |
assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
|
64 |
tag = response['tag_name'] # i.e. 'v1.0'
|
65 |
-
except: # fallback plan
|
66 |
assets = ['yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
|
67 |
'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
|
68 |
try:
|
69 |
tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
|
70 |
-
except:
|
71 |
tag = 'v6.0' # current release
|
72 |
|
73 |
if name in assets:
|
|
|
62 |
response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api
|
63 |
assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
|
64 |
tag = response['tag_name'] # i.e. 'v1.0'
|
65 |
+
except Exception: # fallback plan
|
66 |
assets = ['yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
|
67 |
'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
|
68 |
try:
|
69 |
tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
|
70 |
+
except Exception:
|
71 |
tag = 'v6.0' # current release
|
72 |
|
73 |
if name in assets:
|
utils/general.py
CHANGED
@@ -295,7 +295,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta
|
|
295 |
for r in requirements:
|
296 |
try:
|
297 |
pkg.require(r)
|
298 |
-
except Exception
|
299 |
s = f"{prefix} {r} not found and is required by YOLOv5"
|
300 |
if install:
|
301 |
LOGGER.info(f"{s}, attempting auto-update...")
|
@@ -699,16 +699,16 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non
|
|
699 |
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
|
700 |
for xi, x in enumerate(prediction): # image index, image inference
|
701 |
# Apply constraints
|
702 |
-
|
703 |
x = x[xc[xi]] # confidence
|
704 |
|
705 |
# Cat apriori labels if autolabelling
|
706 |
if labels and len(labels[xi]):
|
707 |
-
|
708 |
-
v = torch.zeros((len(
|
709 |
-
v[:, :4] =
|
710 |
v[:, 4] = 1.0 # conf
|
711 |
-
v[range(len(
|
712 |
x = torch.cat((x, v), 0)
|
713 |
|
714 |
# If none remain process next image
|
@@ -783,7 +783,8 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op
|
|
783 |
|
784 |
|
785 |
def print_mutation(results, hyp, save_dir, bucket):
|
786 |
-
evolve_csv
|
|
|
787 |
keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
|
788 |
'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps]
|
789 |
keys = tuple(x.strip() for x in keys)
|
|
|
295 |
for r in requirements:
|
296 |
try:
|
297 |
pkg.require(r)
|
298 |
+
except Exception: # DistributionNotFound or VersionConflict if requirements not met
|
299 |
s = f"{prefix} {r} not found and is required by YOLOv5"
|
300 |
if install:
|
301 |
LOGGER.info(f"{s}, attempting auto-update...")
|
|
|
699 |
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
|
700 |
for xi, x in enumerate(prediction): # image index, image inference
|
701 |
# Apply constraints
|
702 |
+
x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
|
703 |
x = x[xc[xi]] # confidence
|
704 |
|
705 |
# Cat apriori labels if autolabelling
|
706 |
if labels and len(labels[xi]):
|
707 |
+
lb = labels[xi]
|
708 |
+
v = torch.zeros((len(lb), nc + 5), device=x.device)
|
709 |
+
v[:, :4] = lb[:, 1:5] # box
|
710 |
v[:, 4] = 1.0 # conf
|
711 |
+
v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls
|
712 |
x = torch.cat((x, v), 0)
|
713 |
|
714 |
# If none remain process next image
|
|
|
783 |
|
784 |
|
785 |
def print_mutation(results, hyp, save_dir, bucket):
|
786 |
+
evolve_csv = save_dir / 'evolve.csv'
|
787 |
+
evolve_yaml = save_dir / 'hyp_evolve.yaml'
|
788 |
keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
|
789 |
'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps]
|
790 |
keys = tuple(x.strip() for x in keys)
|
utils/loggers/wandb/wandb_utils.py
CHANGED
@@ -288,7 +288,7 @@ class WandbLogger():
|
|
288 |
model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest")
|
289 |
assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
|
290 |
modeldir = model_artifact.download()
|
291 |
-
epochs_trained = model_artifact.metadata.get('epochs_trained')
|
292 |
total_epochs = model_artifact.metadata.get('total_epochs')
|
293 |
is_finished = total_epochs is None
|
294 |
assert not is_finished, 'training is finished, can only resume incomplete runs.'
|
|
|
288 |
model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest")
|
289 |
assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
|
290 |
modeldir = model_artifact.download()
|
291 |
+
# epochs_trained = model_artifact.metadata.get('epochs_trained')
|
292 |
total_epochs = model_artifact.metadata.get('total_epochs')
|
293 |
is_finished = total_epochs is None
|
294 |
assert not is_finished, 'training is finished, can only resume incomplete runs.'
|
utils/metrics.py
CHANGED
@@ -239,6 +239,7 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=
|
|
239 |
return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf
|
240 |
return iou # IoU
|
241 |
|
|
|
242 |
def box_iou(box1, box2):
|
243 |
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
|
244 |
"""
|
|
|
239 |
return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf
|
240 |
return iou # IoU
|
241 |
|
242 |
+
|
243 |
def box_iou(box1, box2):
|
244 |
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
|
245 |
"""
|
utils/plots.py
CHANGED
@@ -54,7 +54,7 @@ def check_pil_font(font=FONT, size=10):
|
|
54 |
font = font if font.exists() else (CONFIG_DIR / font.name)
|
55 |
try:
|
56 |
return ImageFont.truetype(str(font) if font.exists() else font.name, size)
|
57 |
-
except Exception
|
58 |
check_font(font)
|
59 |
try:
|
60 |
return ImageFont.truetype(str(font), size)
|
@@ -340,7 +340,7 @@ def plot_labels(labels, names=(), save_dir=Path('')):
|
|
340 |
matplotlib.use('svg') # faster
|
341 |
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
|
342 |
y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
|
343 |
-
|
344 |
ax[0].set_ylabel('instances')
|
345 |
if 0 < len(names) < 30:
|
346 |
ax[0].set_xticks(range(len(names)))
|
|
|
54 |
font = font if font.exists() else (CONFIG_DIR / font.name)
|
55 |
try:
|
56 |
return ImageFont.truetype(str(font) if font.exists() else font.name, size)
|
57 |
+
except Exception: # download if missing
|
58 |
check_font(font)
|
59 |
try:
|
60 |
return ImageFont.truetype(str(font), size)
|
|
|
340 |
matplotlib.use('svg') # faster
|
341 |
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
|
342 |
y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
|
343 |
+
[y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195
|
344 |
ax[0].set_ylabel('instances')
|
345 |
if 0 < len(names) < 30:
|
346 |
ax[0].set_xticks(range(len(names)))
|
utils/torch_utils.py
CHANGED
@@ -49,7 +49,7 @@ def git_describe(path=Path(__file__).parent): # path must be a directory
|
|
49 |
s = f'git -C {path} describe --tags --long --always'
|
50 |
try:
|
51 |
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
|
52 |
-
except subprocess.CalledProcessError
|
53 |
return '' # not a git repository
|
54 |
|
55 |
|
@@ -59,7 +59,7 @@ def device_count():
|
|
59 |
try:
|
60 |
cmd = 'nvidia-smi -L | wc -l'
|
61 |
return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1])
|
62 |
-
except Exception
|
63 |
return 0
|
64 |
|
65 |
|
@@ -124,7 +124,7 @@ def profile(input, ops, n=10, device=None):
|
|
124 |
tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward
|
125 |
try:
|
126 |
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
|
127 |
-
except:
|
128 |
flops = 0
|
129 |
|
130 |
try:
|
@@ -135,7 +135,7 @@ def profile(input, ops, n=10, device=None):
|
|
135 |
try:
|
136 |
_ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()
|
137 |
t[2] = time_sync()
|
138 |
-
except Exception
|
139 |
# print(e) # for debug
|
140 |
t[2] = float('nan')
|
141 |
tf += (t[1] - t[0]) * 1000 / n # ms per op forward
|
|
|
49 |
s = f'git -C {path} describe --tags --long --always'
|
50 |
try:
|
51 |
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
|
52 |
+
except subprocess.CalledProcessError:
|
53 |
return '' # not a git repository
|
54 |
|
55 |
|
|
|
59 |
try:
|
60 |
cmd = 'nvidia-smi -L | wc -l'
|
61 |
return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1])
|
62 |
+
except Exception:
|
63 |
return 0
|
64 |
|
65 |
|
|
|
124 |
tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward
|
125 |
try:
|
126 |
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
|
127 |
+
except Exception:
|
128 |
flops = 0
|
129 |
|
130 |
try:
|
|
|
135 |
try:
|
136 |
_ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()
|
137 |
t[2] = time_sync()
|
138 |
+
except Exception: # no backward method
|
139 |
# print(e) # for debug
|
140 |
t[2] = float('nan')
|
141 |
tf += (t[1] - t[0]) * 1000 / n # ms per op forward
|