Update `get_loggers()` (#4854)
Browse files* Update `set_logging()`
* Update export.py
* pre-commit fixes
* Update LoadImages
* Update LoadStreams
* Update print_args
* Single LOGGER definition
* yolo.py fix
Co-authored-by: pre-commit <[email protected]>
- detect.py +8 -9
- export.py +31 -32
- models/tf.py +2 -5
- models/yolo.py +1 -4
- train.py +5 -7
- utils/datasets.py +15 -15
- utils/general.py +11 -7
- val.py +11 -12
detect.py
CHANGED
@@ -25,8 +25,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
|
25 |
from models.experimental import attempt_load
|
26 |
from utils.datasets import LoadImages, LoadStreams
|
27 |
from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \
|
28 |
-
increment_path, non_max_suppression, print_args, save_one_box, scale_coords,
|
29 |
-
strip_optimizer, xyxy2xywh
|
30 |
from utils.plots import Annotator, colors
|
31 |
from utils.torch_utils import load_classifier, select_device, time_sync
|
32 |
|
@@ -68,7 +67,6 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
|
|
68 |
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
69 |
|
70 |
# Initialize
|
71 |
-
set_logging()
|
72 |
device = select_device(device)
|
73 |
half &= device.type != 'cpu' # half precision only supported on CUDA
|
74 |
|
@@ -132,7 +130,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
|
|
132 |
if pt and device.type != 'cpu':
|
133 |
model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once
|
134 |
dt, seen = [0.0, 0.0, 0.0], 0
|
135 |
-
for path, img, im0s, vid_cap in dataset:
|
136 |
t1 = time_sync()
|
137 |
if onnx:
|
138 |
img = img.astype('float32')
|
@@ -191,9 +189,10 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
|
|
191 |
for i, det in enumerate(pred): # per image
|
192 |
seen += 1
|
193 |
if webcam: # batch_size >= 1
|
194 |
-
p,
|
|
|
195 |
else:
|
196 |
-
p,
|
197 |
|
198 |
p = Path(p) # to Path
|
199 |
save_path = str(save_dir / p.name) # img.jpg
|
@@ -227,7 +226,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
|
|
227 |
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
|
228 |
|
229 |
# Print time (inference-only)
|
230 |
-
|
231 |
|
232 |
# Stream results
|
233 |
im0 = annotator.result()
|
@@ -256,10 +255,10 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
|
|
256 |
|
257 |
# Print results
|
258 |
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
|
259 |
-
|
260 |
if save_txt or save_img:
|
261 |
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
262 |
-
|
263 |
if update:
|
264 |
strip_optimizer(weights) # update model (to fix SourceChangeWarning)
|
265 |
|
|
|
25 |
from models.experimental import attempt_load
|
26 |
from utils.datasets import LoadImages, LoadStreams
|
27 |
from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \
|
28 |
+
increment_path, non_max_suppression, print_args, save_one_box, scale_coords, strip_optimizer, xyxy2xywh, LOGGER
|
|
|
29 |
from utils.plots import Annotator, colors
|
30 |
from utils.torch_utils import load_classifier, select_device, time_sync
|
31 |
|
|
|
67 |
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
68 |
|
69 |
# Initialize
|
|
|
70 |
device = select_device(device)
|
71 |
half &= device.type != 'cpu' # half precision only supported on CUDA
|
72 |
|
|
|
130 |
if pt and device.type != 'cpu':
|
131 |
model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once
|
132 |
dt, seen = [0.0, 0.0, 0.0], 0
|
133 |
+
for path, img, im0s, vid_cap, s in dataset:
|
134 |
t1 = time_sync()
|
135 |
if onnx:
|
136 |
img = img.astype('float32')
|
|
|
189 |
for i, det in enumerate(pred): # per image
|
190 |
seen += 1
|
191 |
if webcam: # batch_size >= 1
|
192 |
+
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
193 |
+
s += f'{i}: '
|
194 |
else:
|
195 |
+
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
196 |
|
197 |
p = Path(p) # to Path
|
198 |
save_path = str(save_dir / p.name) # img.jpg
|
|
|
226 |
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
|
227 |
|
228 |
# Print time (inference-only)
|
229 |
+
LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')
|
230 |
|
231 |
# Stream results
|
232 |
im0 = annotator.result()
|
|
|
255 |
|
256 |
# Print results
|
257 |
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
|
258 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
259 |
if save_txt or save_img:
|
260 |
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
261 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
262 |
if update:
|
263 |
strip_optimizer(weights) # update model (to fix SourceChangeWarning)
|
264 |
|
export.py
CHANGED
@@ -42,23 +42,23 @@ from models.experimental import attempt_load
|
|
42 |
from models.yolo import Detect
|
43 |
from utils.activations import SiLU
|
44 |
from utils.datasets import LoadImages
|
45 |
-
from utils.general import
|
46 |
-
|
47 |
from utils.torch_utils import select_device
|
48 |
|
49 |
|
50 |
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
|
51 |
# YOLOv5 TorchScript model export
|
52 |
try:
|
53 |
-
|
54 |
f = file.with_suffix('.torchscript.pt')
|
55 |
|
56 |
ts = torch.jit.trace(model, im, strict=False)
|
57 |
(optimize_for_mobile(ts) if optimize else ts).save(f)
|
58 |
|
59 |
-
|
60 |
except Exception as e:
|
61 |
-
|
62 |
|
63 |
|
64 |
def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')):
|
@@ -67,7 +67,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst
|
|
67 |
check_requirements(('onnx',))
|
68 |
import onnx
|
69 |
|
70 |
-
|
71 |
f = file.with_suffix('.onnx')
|
72 |
|
73 |
torch.onnx.export(model, im, f, verbose=False, opset_version=opset,
|
@@ -82,7 +82,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst
|
|
82 |
# Checks
|
83 |
model_onnx = onnx.load(f) # load onnx model
|
84 |
onnx.checker.check_model(model_onnx) # check onnx model
|
85 |
-
#
|
86 |
|
87 |
# Simplify
|
88 |
if simplify:
|
@@ -90,7 +90,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst
|
|
90 |
check_requirements(('onnx-simplifier',))
|
91 |
import onnxsim
|
92 |
|
93 |
-
|
94 |
model_onnx, check = onnxsim.simplify(
|
95 |
model_onnx,
|
96 |
dynamic_input_shape=dynamic,
|
@@ -98,11 +98,11 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst
|
|
98 |
assert check, 'assert check failed'
|
99 |
onnx.save(model_onnx, f)
|
100 |
except Exception as e:
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
except Exception as e:
|
105 |
-
|
106 |
|
107 |
|
108 |
def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
|
@@ -112,7 +112,7 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
|
|
112 |
check_requirements(('coremltools',))
|
113 |
import coremltools as ct
|
114 |
|
115 |
-
|
116 |
f = file.with_suffix('.mlmodel')
|
117 |
|
118 |
model.train() # CoreML exports should be placed in model.train() mode
|
@@ -120,9 +120,9 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
|
|
120 |
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255.0, bias=[0, 0, 0])])
|
121 |
ct_model.save(f)
|
122 |
|
123 |
-
|
124 |
except Exception as e:
|
125 |
-
|
126 |
|
127 |
return ct_model
|
128 |
|
@@ -137,7 +137,7 @@ def export_saved_model(model, im, file, dynamic,
|
|
137 |
from tensorflow import keras
|
138 |
from models.tf import TFModel, TFDetect
|
139 |
|
140 |
-
|
141 |
f = str(file).replace('.pt', '_saved_model')
|
142 |
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
143 |
|
@@ -151,9 +151,9 @@ def export_saved_model(model, im, file, dynamic,
|
|
151 |
keras_model.summary()
|
152 |
keras_model.save(f, save_format='tf')
|
153 |
|
154 |
-
|
155 |
except Exception as e:
|
156 |
-
|
157 |
|
158 |
return keras_model
|
159 |
|
@@ -164,7 +164,7 @@ def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')):
|
|
164 |
import tensorflow as tf
|
165 |
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
166 |
|
167 |
-
|
168 |
f = file.with_suffix('.pb')
|
169 |
|
170 |
m = tf.function(lambda x: keras_model(x)) # full model
|
@@ -173,9 +173,9 @@ def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')):
|
|
173 |
frozen_func.graph.as_graph_def()
|
174 |
tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
|
175 |
|
176 |
-
|
177 |
except Exception as e:
|
178 |
-
|
179 |
|
180 |
|
181 |
def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')):
|
@@ -184,7 +184,7 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te
|
|
184 |
import tensorflow as tf
|
185 |
from models.tf import representative_dataset_gen
|
186 |
|
187 |
-
|
188 |
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
189 |
f = str(file).replace('.pt', '-fp16.tflite')
|
190 |
|
@@ -204,10 +204,10 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te
|
|
204 |
|
205 |
tflite_model = converter.convert()
|
206 |
open(f, "wb").write(tflite_model)
|
207 |
-
|
208 |
|
209 |
except Exception as e:
|
210 |
-
|
211 |
|
212 |
|
213 |
def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')):
|
@@ -217,7 +217,7 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')):
|
|
217 |
import re
|
218 |
import tensorflowjs as tfjs
|
219 |
|
220 |
-
|
221 |
f = str(file).replace('.pt', '_web_model') # js dir
|
222 |
f_pb = file.with_suffix('.pb') # *.pb path
|
223 |
f_json = f + '/model.json' # *.json path
|
@@ -240,9 +240,9 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')):
|
|
240 |
json)
|
241 |
j.write(subst)
|
242 |
|
243 |
-
|
244 |
except Exception as e:
|
245 |
-
|
246 |
|
247 |
|
248 |
@torch.no_grad()
|
@@ -297,7 +297,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
|
|
297 |
|
298 |
for _ in range(2):
|
299 |
y = model(im) # dry runs
|
300 |
-
|
301 |
|
302 |
# Exports
|
303 |
if 'torchscript' in include:
|
@@ -322,9 +322,9 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
|
|
322 |
export_tfjs(model, im, file)
|
323 |
|
324 |
# Finish
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
|
329 |
|
330 |
def parse_opt():
|
@@ -355,7 +355,6 @@ def parse_opt():
|
|
355 |
|
356 |
|
357 |
def main(opt):
|
358 |
-
set_logging()
|
359 |
run(**vars(opt))
|
360 |
|
361 |
|
|
|
42 |
from models.yolo import Detect
|
43 |
from utils.activations import SiLU
|
44 |
from utils.datasets import LoadImages
|
45 |
+
from utils.general import check_dataset, check_img_size, check_requirements, colorstr, file_size, print_args, \
|
46 |
+
url2file, LOGGER
|
47 |
from utils.torch_utils import select_device
|
48 |
|
49 |
|
50 |
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
|
51 |
# YOLOv5 TorchScript model export
|
52 |
try:
|
53 |
+
LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
|
54 |
f = file.with_suffix('.torchscript.pt')
|
55 |
|
56 |
ts = torch.jit.trace(model, im, strict=False)
|
57 |
(optimize_for_mobile(ts) if optimize else ts).save(f)
|
58 |
|
59 |
+
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
|
60 |
except Exception as e:
|
61 |
+
LOGGER.info(f'{prefix} export failure: {e}')
|
62 |
|
63 |
|
64 |
def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')):
|
|
|
67 |
check_requirements(('onnx',))
|
68 |
import onnx
|
69 |
|
70 |
+
LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
|
71 |
f = file.with_suffix('.onnx')
|
72 |
|
73 |
torch.onnx.export(model, im, f, verbose=False, opset_version=opset,
|
|
|
82 |
# Checks
|
83 |
model_onnx = onnx.load(f) # load onnx model
|
84 |
onnx.checker.check_model(model_onnx) # check onnx model
|
85 |
+
# LOGGER.info(onnx.helper.printable_graph(model_onnx.graph)) # print
|
86 |
|
87 |
# Simplify
|
88 |
if simplify:
|
|
|
90 |
check_requirements(('onnx-simplifier',))
|
91 |
import onnxsim
|
92 |
|
93 |
+
LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
|
94 |
model_onnx, check = onnxsim.simplify(
|
95 |
model_onnx,
|
96 |
dynamic_input_shape=dynamic,
|
|
|
98 |
assert check, 'assert check failed'
|
99 |
onnx.save(model_onnx, f)
|
100 |
except Exception as e:
|
101 |
+
LOGGER.info(f'{prefix} simplifier failure: {e}')
|
102 |
+
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
|
103 |
+
LOGGER.info(f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'")
|
104 |
except Exception as e:
|
105 |
+
LOGGER.info(f'{prefix} export failure: {e}')
|
106 |
|
107 |
|
108 |
def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
|
|
|
112 |
check_requirements(('coremltools',))
|
113 |
import coremltools as ct
|
114 |
|
115 |
+
LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
|
116 |
f = file.with_suffix('.mlmodel')
|
117 |
|
118 |
model.train() # CoreML exports should be placed in model.train() mode
|
|
|
120 |
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255.0, bias=[0, 0, 0])])
|
121 |
ct_model.save(f)
|
122 |
|
123 |
+
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
|
124 |
except Exception as e:
|
125 |
+
LOGGER.info(f'\n{prefix} export failure: {e}')
|
126 |
|
127 |
return ct_model
|
128 |
|
|
|
137 |
from tensorflow import keras
|
138 |
from models.tf import TFModel, TFDetect
|
139 |
|
140 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
141 |
f = str(file).replace('.pt', '_saved_model')
|
142 |
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
143 |
|
|
|
151 |
keras_model.summary()
|
152 |
keras_model.save(f, save_format='tf')
|
153 |
|
154 |
+
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
|
155 |
except Exception as e:
|
156 |
+
LOGGER.info(f'\n{prefix} export failure: {e}')
|
157 |
|
158 |
return keras_model
|
159 |
|
|
|
164 |
import tensorflow as tf
|
165 |
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
166 |
|
167 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
168 |
f = file.with_suffix('.pb')
|
169 |
|
170 |
m = tf.function(lambda x: keras_model(x)) # full model
|
|
|
173 |
frozen_func.graph.as_graph_def()
|
174 |
tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
|
175 |
|
176 |
+
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
|
177 |
except Exception as e:
|
178 |
+
LOGGER.info(f'\n{prefix} export failure: {e}')
|
179 |
|
180 |
|
181 |
def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')):
|
|
|
184 |
import tensorflow as tf
|
185 |
from models.tf import representative_dataset_gen
|
186 |
|
187 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
188 |
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
189 |
f = str(file).replace('.pt', '-fp16.tflite')
|
190 |
|
|
|
204 |
|
205 |
tflite_model = converter.convert()
|
206 |
open(f, "wb").write(tflite_model)
|
207 |
+
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
|
208 |
|
209 |
except Exception as e:
|
210 |
+
LOGGER.info(f'\n{prefix} export failure: {e}')
|
211 |
|
212 |
|
213 |
def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')):
|
|
|
217 |
import re
|
218 |
import tensorflowjs as tfjs
|
219 |
|
220 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
|
221 |
f = str(file).replace('.pt', '_web_model') # js dir
|
222 |
f_pb = file.with_suffix('.pb') # *.pb path
|
223 |
f_json = f + '/model.json' # *.json path
|
|
|
240 |
json)
|
241 |
j.write(subst)
|
242 |
|
243 |
+
LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
|
244 |
except Exception as e:
|
245 |
+
LOGGER.info(f'\n{prefix} export failure: {e}')
|
246 |
|
247 |
|
248 |
@torch.no_grad()
|
|
|
297 |
|
298 |
for _ in range(2):
|
299 |
y = model(im) # dry runs
|
300 |
+
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} ({file_size(file):.1f} MB)")
|
301 |
|
302 |
# Exports
|
303 |
if 'torchscript' in include:
|
|
|
322 |
export_tfjs(model, im, file)
|
323 |
|
324 |
# Finish
|
325 |
+
LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)'
|
326 |
+
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
|
327 |
+
f'\nVisualize with https://netron.app')
|
328 |
|
329 |
|
330 |
def parse_opt():
|
|
|
355 |
|
356 |
|
357 |
def main(opt):
|
|
|
358 |
run(**vars(opt))
|
359 |
|
360 |
|
models/tf.py
CHANGED
@@ -31,11 +31,9 @@ from tensorflow import keras
|
|
31 |
from models.common import Bottleneck, BottleneckCSP, Concat, Conv, C3, DWConv, Focus, SPP, SPPF, autopad
|
32 |
from models.experimental import CrossConv, MixConv2d, attempt_load
|
33 |
from models.yolo import Detect
|
34 |
-
from utils.general import make_divisible, print_args,
|
35 |
from utils.activations import SiLU
|
36 |
|
37 |
-
LOGGER = logging.getLogger(__name__)
|
38 |
-
|
39 |
|
40 |
class TFBN(keras.layers.Layer):
|
41 |
# TensorFlow BatchNormalization wrapper
|
@@ -336,7 +334,7 @@ class TFModel:
|
|
336 |
|
337 |
# Define model
|
338 |
if nc and nc != self.yaml['nc']:
|
339 |
-
|
340 |
self.yaml['nc'] = nc # override yaml value
|
341 |
self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
|
342 |
|
@@ -457,7 +455,6 @@ def parse_opt():
|
|
457 |
|
458 |
|
459 |
def main(opt):
|
460 |
-
set_logging()
|
461 |
run(**vars(opt))
|
462 |
|
463 |
|
|
|
31 |
from models.common import Bottleneck, BottleneckCSP, Concat, Conv, C3, DWConv, Focus, SPP, SPPF, autopad
|
32 |
from models.experimental import CrossConv, MixConv2d, attempt_load
|
33 |
from models.yolo import Detect
|
34 |
+
from utils.general import make_divisible, print_args, LOGGER
|
35 |
from utils.activations import SiLU
|
36 |
|
|
|
|
|
37 |
|
38 |
class TFBN(keras.layers.Layer):
|
39 |
# TensorFlow BatchNormalization wrapper
|
|
|
334 |
|
335 |
# Define model
|
336 |
if nc and nc != self.yaml['nc']:
|
337 |
+
LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
|
338 |
self.yaml['nc'] = nc # override yaml value
|
339 |
self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
|
340 |
|
|
|
455 |
|
456 |
|
457 |
def main(opt):
|
|
|
458 |
run(**vars(opt))
|
459 |
|
460 |
|
models/yolo.py
CHANGED
@@ -20,7 +20,7 @@ if str(ROOT) not in sys.path:
|
|
20 |
from models.common import *
|
21 |
from models.experimental import *
|
22 |
from utils.autoanchor import check_anchor_order
|
23 |
-
from utils.general import check_yaml, make_divisible, print_args,
|
24 |
from utils.plots import feature_visualization
|
25 |
from utils.torch_utils import copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, \
|
26 |
select_device, time_sync
|
@@ -30,8 +30,6 @@ try:
|
|
30 |
except ImportError:
|
31 |
thop = None
|
32 |
|
33 |
-
LOGGER = logging.getLogger(__name__)
|
34 |
-
|
35 |
|
36 |
class Detect(nn.Module):
|
37 |
stride = None # strides computed during build
|
@@ -311,7 +309,6 @@ if __name__ == '__main__':
|
|
311 |
opt = parser.parse_args()
|
312 |
opt.cfg = check_yaml(opt.cfg) # check YAML
|
313 |
print_args(FILE.stem, opt)
|
314 |
-
set_logging()
|
315 |
device = select_device(opt.device)
|
316 |
|
317 |
# Create model
|
|
|
20 |
from models.common import *
|
21 |
from models.experimental import *
|
22 |
from utils.autoanchor import check_anchor_order
|
23 |
+
from utils.general import check_version, check_yaml, make_divisible, print_args, LOGGER
|
24 |
from utils.plots import feature_visualization
|
25 |
from utils.torch_utils import copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, \
|
26 |
select_device, time_sync
|
|
|
30 |
except ImportError:
|
31 |
thop = None
|
32 |
|
|
|
|
|
33 |
|
34 |
class Detect(nn.Module):
|
35 |
stride = None # strides computed during build
|
|
|
309 |
opt = parser.parse_args()
|
310 |
opt.cfg = check_yaml(opt.cfg) # check YAML
|
311 |
print_args(FILE.stem, opt)
|
|
|
312 |
device = select_device(opt.device)
|
313 |
|
314 |
# Create model
|
train.py
CHANGED
@@ -40,7 +40,7 @@ from utils.autobatch import check_train_batch_size
|
|
40 |
from utils.datasets import create_dataloader
|
41 |
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
|
42 |
strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \
|
43 |
-
check_file, check_yaml, check_suffix, print_args, print_mutation,
|
44 |
from utils.downloads import attempt_download
|
45 |
from utils.loss import ComputeLoss
|
46 |
from utils.plots import plot_labels, plot_evolve
|
@@ -51,7 +51,6 @@ from utils.metrics import fitness
|
|
51 |
from utils.loggers import Loggers
|
52 |
from utils.callbacks import Callbacks
|
53 |
|
54 |
-
LOGGER = logging.getLogger(__name__)
|
55 |
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
56 |
RANK = int(os.getenv('RANK', -1))
|
57 |
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
@@ -129,7 +128,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
129 |
for k, v in model.named_parameters():
|
130 |
v.requires_grad = True # train all layers
|
131 |
if any(x in k for x in freeze):
|
132 |
-
|
133 |
v.requires_grad = False
|
134 |
|
135 |
# Image size
|
@@ -485,7 +484,6 @@ def parse_opt(known=False):
|
|
485 |
|
486 |
def main(opt, callbacks=Callbacks()):
|
487 |
# Checks
|
488 |
-
set_logging(RANK)
|
489 |
if RANK in [-1, 0]:
|
490 |
print_args(FILE.stem, opt)
|
491 |
check_git_status()
|
@@ -609,9 +607,9 @@ def main(opt, callbacks=Callbacks()):
|
|
609 |
|
610 |
# Plot results
|
611 |
plot_evolve(evolve_csv)
|
612 |
-
|
613 |
-
|
614 |
-
|
615 |
|
616 |
|
617 |
def run(**kwargs):
|
|
|
40 |
from utils.datasets import create_dataloader
|
41 |
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
|
42 |
strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \
|
43 |
+
check_file, check_yaml, check_suffix, print_args, print_mutation, one_cycle, colorstr, methods, LOGGER
|
44 |
from utils.downloads import attempt_download
|
45 |
from utils.loss import ComputeLoss
|
46 |
from utils.plots import plot_labels, plot_evolve
|
|
|
51 |
from utils.loggers import Loggers
|
52 |
from utils.callbacks import Callbacks
|
53 |
|
|
|
54 |
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
55 |
RANK = int(os.getenv('RANK', -1))
|
56 |
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
|
|
128 |
for k, v in model.named_parameters():
|
129 |
v.requires_grad = True # train all layers
|
130 |
if any(x in k for x in freeze):
|
131 |
+
LOGGER.info(f'freezing {k}')
|
132 |
v.requires_grad = False
|
133 |
|
134 |
# Image size
|
|
|
484 |
|
485 |
def main(opt, callbacks=Callbacks()):
|
486 |
# Checks
|
|
|
487 |
if RANK in [-1, 0]:
|
488 |
print_args(FILE.stem, opt)
|
489 |
check_git_status()
|
|
|
607 |
|
608 |
# Plot results
|
609 |
plot_evolve(evolve_csv)
|
610 |
+
LOGGER.info(f'Hyperparameter evolution finished\n'
|
611 |
+
f"Results saved to {colorstr('bold', save_dir)}\n"
|
612 |
+
f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}')
|
613 |
|
614 |
|
615 |
def run(**kwargs):
|
utils/datasets.py
CHANGED
@@ -28,7 +28,7 @@ from tqdm import tqdm
|
|
28 |
|
29 |
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
|
30 |
from utils.general import check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, \
|
31 |
-
xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy
|
32 |
from utils.torch_utils import torch_distributed_zero_first
|
33 |
|
34 |
# Parameters
|
@@ -210,14 +210,14 @@ class LoadImages:
|
|
210 |
ret_val, img0 = self.cap.read()
|
211 |
|
212 |
self.frame += 1
|
213 |
-
|
214 |
|
215 |
else:
|
216 |
# Read image
|
217 |
self.count += 1
|
218 |
img0 = cv2.imread(path) # BGR
|
219 |
-
assert img0 is not None, 'Image Not Found '
|
220 |
-
|
221 |
|
222 |
# Padded resize
|
223 |
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
|
@@ -226,7 +226,7 @@ class LoadImages:
|
|
226 |
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
227 |
img = np.ascontiguousarray(img)
|
228 |
|
229 |
-
return path, img, img0, self.cap
|
230 |
|
231 |
def new_video(self, path):
|
232 |
self.frame = 0
|
@@ -264,7 +264,7 @@ class LoadWebcam: # for inference
|
|
264 |
# Print
|
265 |
assert ret_val, f'Camera Error {self.pipe}'
|
266 |
img_path = 'webcam.jpg'
|
267 |
-
|
268 |
|
269 |
# Padded resize
|
270 |
img = letterbox(img0, self.img_size, stride=self.stride)[0]
|
@@ -273,7 +273,7 @@ class LoadWebcam: # for inference
|
|
273 |
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
274 |
img = np.ascontiguousarray(img)
|
275 |
|
276 |
-
return img_path, img, img0, None
|
277 |
|
278 |
def __len__(self):
|
279 |
return 0
|
@@ -298,14 +298,14 @@ class LoadStreams:
|
|
298 |
self.auto = auto
|
299 |
for i, s in enumerate(sources): # index, source
|
300 |
# Start thread to read frames from video stream
|
301 |
-
|
302 |
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
|
303 |
check_requirements(('pafy', 'youtube_dl'))
|
304 |
import pafy
|
305 |
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
|
306 |
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
|
307 |
cap = cv2.VideoCapture(s)
|
308 |
-
assert cap.isOpened(), f'Failed to open {s}'
|
309 |
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
310 |
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
311 |
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
|
@@ -313,15 +313,15 @@ class LoadStreams:
|
|
313 |
|
314 |
_, self.imgs[i] = cap.read() # guarantee first frame
|
315 |
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
|
316 |
-
|
317 |
self.threads[i].start()
|
318 |
-
|
319 |
|
320 |
# check for common shapes
|
321 |
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
|
322 |
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
|
323 |
if not self.rect:
|
324 |
-
|
325 |
|
326 |
def update(self, i, cap, stream):
|
327 |
# Read stream `i` frames in daemon thread
|
@@ -335,7 +335,7 @@ class LoadStreams:
|
|
335 |
if success:
|
336 |
self.imgs[i] = im
|
337 |
else:
|
338 |
-
|
339 |
self.imgs[i] *= 0
|
340 |
cap.open(stream) # re-open stream if signal was lost
|
341 |
time.sleep(1 / self.fps[i]) # wait time
|
@@ -361,7 +361,7 @@ class LoadStreams:
|
|
361 |
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
|
362 |
img = np.ascontiguousarray(img)
|
363 |
|
364 |
-
return self.sources, img, img0, None
|
365 |
|
366 |
def __len__(self):
|
367 |
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
|
@@ -666,7 +666,7 @@ def load_image(self, i):
|
|
666 |
else: # read image
|
667 |
path = self.img_files[i]
|
668 |
im = cv2.imread(path) # BGR
|
669 |
-
assert im is not None, 'Image Not Found '
|
670 |
h0, w0 = im.shape[:2] # orig hw
|
671 |
r = self.img_size / max(h0, w0) # ratio
|
672 |
if r != 1: # if sizes are not equal
|
|
|
28 |
|
29 |
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
|
30 |
from utils.general import check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, \
|
31 |
+
xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy, LOGGER
|
32 |
from utils.torch_utils import torch_distributed_zero_first
|
33 |
|
34 |
# Parameters
|
|
|
210 |
ret_val, img0 = self.cap.read()
|
211 |
|
212 |
self.frame += 1
|
213 |
+
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
|
214 |
|
215 |
else:
|
216 |
# Read image
|
217 |
self.count += 1
|
218 |
img0 = cv2.imread(path) # BGR
|
219 |
+
assert img0 is not None, f'Image Not Found {path}'
|
220 |
+
s = f'image {self.count}/{self.nf} {path}: '
|
221 |
|
222 |
# Padded resize
|
223 |
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
|
|
|
226 |
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
227 |
img = np.ascontiguousarray(img)
|
228 |
|
229 |
+
return path, img, img0, self.cap, s
|
230 |
|
231 |
def new_video(self, path):
|
232 |
self.frame = 0
|
|
|
264 |
# Print
|
265 |
assert ret_val, f'Camera Error {self.pipe}'
|
266 |
img_path = 'webcam.jpg'
|
267 |
+
s = f'webcam {self.count}: '
|
268 |
|
269 |
# Padded resize
|
270 |
img = letterbox(img0, self.img_size, stride=self.stride)[0]
|
|
|
273 |
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
274 |
img = np.ascontiguousarray(img)
|
275 |
|
276 |
+
return img_path, img, img0, None, s
|
277 |
|
278 |
def __len__(self):
|
279 |
return 0
|
|
|
298 |
self.auto = auto
|
299 |
for i, s in enumerate(sources): # index, source
|
300 |
# Start thread to read frames from video stream
|
301 |
+
st = f'{i + 1}/{n}: {s}... '
|
302 |
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
|
303 |
check_requirements(('pafy', 'youtube_dl'))
|
304 |
import pafy
|
305 |
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
|
306 |
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
|
307 |
cap = cv2.VideoCapture(s)
|
308 |
+
assert cap.isOpened(), f'{st}Failed to open {s}'
|
309 |
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
310 |
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
311 |
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
|
|
|
313 |
|
314 |
_, self.imgs[i] = cap.read() # guarantee first frame
|
315 |
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
|
316 |
+
LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
|
317 |
self.threads[i].start()
|
318 |
+
LOGGER.info('') # newline
|
319 |
|
320 |
# check for common shapes
|
321 |
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
|
322 |
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
|
323 |
if not self.rect:
|
324 |
+
LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')
|
325 |
|
326 |
def update(self, i, cap, stream):
|
327 |
# Read stream `i` frames in daemon thread
|
|
|
335 |
if success:
|
336 |
self.imgs[i] = im
|
337 |
else:
|
338 |
+
LOGGER.warn('WARNING: Video stream unresponsive, please check your IP camera connection.')
|
339 |
self.imgs[i] *= 0
|
340 |
cap.open(stream) # re-open stream if signal was lost
|
341 |
time.sleep(1 / self.fps[i]) # wait time
|
|
|
361 |
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
|
362 |
img = np.ascontiguousarray(img)
|
363 |
|
364 |
+
return self.sources, img, img0, None, ''
|
365 |
|
366 |
def __len__(self):
|
367 |
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
|
|
|
666 |
else: # read image
|
667 |
path = self.img_files[i]
|
668 |
im = cv2.imread(path) # BGR
|
669 |
+
assert im is not None, f'Image Not Found {path}'
|
670 |
h0, w0 = im.shape[:2] # orig hw
|
671 |
r = self.img_size / max(h0, w0) # ratio
|
672 |
if r != 1: # if sizes are not equal
|
utils/general.py
CHANGED
@@ -42,6 +42,16 @@ FILE = Path(__file__).resolve()
|
|
42 |
ROOT = FILE.parents[1] # YOLOv5 root directory
|
43 |
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
class Profile(contextlib.ContextDecorator):
|
46 |
# Usage: @Profile() decorator or 'with Profile():' context manager
|
47 |
def __enter__(self):
|
@@ -87,15 +97,9 @@ def methods(instance):
|
|
87 |
return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
|
88 |
|
89 |
|
90 |
-
def set_logging(rank=-1, verbose=True):
|
91 |
-
logging.basicConfig(
|
92 |
-
format="%(message)s",
|
93 |
-
level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)
|
94 |
-
|
95 |
-
|
96 |
def print_args(name, opt):
|
97 |
# Print argparser arguments
|
98 |
-
|
99 |
|
100 |
|
101 |
def init_seeds(seed=0):
|
|
|
42 |
ROOT = FILE.parents[1] # YOLOv5 root directory
|
43 |
|
44 |
|
45 |
+
def set_logging(name=None, verbose=True):
|
46 |
+
# Sets level and returns logger
|
47 |
+
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
|
48 |
+
logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARN)
|
49 |
+
return logging.getLogger(name)
|
50 |
+
|
51 |
+
|
52 |
+
LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)
|
53 |
+
|
54 |
+
|
55 |
class Profile(contextlib.ContextDecorator):
|
56 |
# Usage: @Profile() decorator or 'with Profile():' context manager
|
57 |
def __enter__(self):
|
|
|
97 |
return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
|
98 |
|
99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
def print_args(name, opt):
|
101 |
# Print argparser arguments
|
102 |
+
LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
|
103 |
|
104 |
|
105 |
def init_seeds(seed=0):
|
val.py
CHANGED
@@ -25,9 +25,9 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
|
25 |
|
26 |
from models.experimental import attempt_load
|
27 |
from utils.datasets import create_dataloader
|
28 |
-
from utils.general import coco80_to_coco91_class, check_dataset, check_img_size,
|
29 |
-
check_suffix, check_yaml,
|
30 |
-
|
31 |
from utils.metrics import ap_per_class, ConfusionMatrix
|
32 |
from utils.plots import output_to_target, plot_images, plot_val_study
|
33 |
from utils.torch_utils import select_device, time_sync
|
@@ -242,18 +242,18 @@ def run(data,
|
|
242 |
|
243 |
# Print results
|
244 |
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
|
245 |
-
|
246 |
|
247 |
# Print results per class
|
248 |
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
249 |
for i, c in enumerate(ap_class):
|
250 |
-
|
251 |
|
252 |
# Print speeds
|
253 |
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
|
254 |
if not training:
|
255 |
shape = (batch_size, 3, imgsz, imgsz)
|
256 |
-
|
257 |
|
258 |
# Plots
|
259 |
if plots:
|
@@ -265,7 +265,7 @@ def run(data,
|
|
265 |
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
266 |
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
|
267 |
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
|
268 |
-
|
269 |
with open(pred_json, 'w') as f:
|
270 |
json.dump(jdict, f)
|
271 |
|
@@ -284,13 +284,13 @@ def run(data,
|
|
284 |
eval.summarize()
|
285 |
map, map50 = eval.stats[:2] # update results ([email protected]:0.95, [email protected])
|
286 |
except Exception as e:
|
287 |
-
|
288 |
|
289 |
# Return results
|
290 |
model.float() # for training
|
291 |
if not training:
|
292 |
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
293 |
-
|
294 |
maps = np.zeros(nc) + map
|
295 |
for i, c in enumerate(ap_class):
|
296 |
maps[c] = ap[i]
|
@@ -327,8 +327,7 @@ def parse_opt():
|
|
327 |
|
328 |
|
329 |
def main(opt):
|
330 |
-
|
331 |
-
check_requirements(exclude=('tensorboard', 'thop'))
|
332 |
|
333 |
if opt.task in ('train', 'val', 'test'): # run normally
|
334 |
run(**vars(opt))
|
@@ -346,7 +345,7 @@ def main(opt):
|
|
346 |
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
|
347 |
y = [] # y axis
|
348 |
for i in x: # img-size
|
349 |
-
|
350 |
r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres,
|
351 |
iou_thres=opt.iou_thres, device=opt.device, save_json=opt.save_json, plots=False)
|
352 |
y.append(r + t) # results and times
|
|
|
25 |
|
26 |
from models.experimental import attempt_load
|
27 |
from utils.datasets import create_dataloader
|
28 |
+
from utils.general import box_iou, coco80_to_coco91_class, colorstr, check_dataset, check_img_size, \
|
29 |
+
check_requirements, check_suffix, check_yaml, increment_path, non_max_suppression, print_args, scale_coords, \
|
30 |
+
xyxy2xywh, xywh2xyxy, LOGGER
|
31 |
from utils.metrics import ap_per_class, ConfusionMatrix
|
32 |
from utils.plots import output_to_target, plot_images, plot_val_study
|
33 |
from utils.torch_utils import select_device, time_sync
|
|
|
242 |
|
243 |
# Print results
|
244 |
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
|
245 |
+
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
|
246 |
|
247 |
# Print results per class
|
248 |
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
249 |
for i, c in enumerate(ap_class):
|
250 |
+
LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
|
251 |
|
252 |
# Print speeds
|
253 |
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
|
254 |
if not training:
|
255 |
shape = (batch_size, 3, imgsz, imgsz)
|
256 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
|
257 |
|
258 |
# Plots
|
259 |
if plots:
|
|
|
265 |
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
266 |
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
|
267 |
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
|
268 |
+
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
|
269 |
with open(pred_json, 'w') as f:
|
270 |
json.dump(jdict, f)
|
271 |
|
|
|
284 |
eval.summarize()
|
285 |
map, map50 = eval.stats[:2] # update results ([email protected]:0.95, [email protected])
|
286 |
except Exception as e:
|
287 |
+
LOGGER.info(f'pycocotools unable to run: {e}')
|
288 |
|
289 |
# Return results
|
290 |
model.float() # for training
|
291 |
if not training:
|
292 |
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
293 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
294 |
maps = np.zeros(nc) + map
|
295 |
for i, c in enumerate(ap_class):
|
296 |
maps[c] = ap[i]
|
|
|
327 |
|
328 |
|
329 |
def main(opt):
|
330 |
+
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
|
|
331 |
|
332 |
if opt.task in ('train', 'val', 'test'): # run normally
|
333 |
run(**vars(opt))
|
|
|
345 |
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
|
346 |
y = [] # y axis
|
347 |
for i in x: # img-size
|
348 |
+
LOGGER.info(f'\nRunning {f} point {i}...')
|
349 |
r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres,
|
350 |
iou_thres=opt.iou_thres, device=opt.device, save_json=opt.save_json, plots=False)
|
351 |
y.append(r + t) # results and times
|