chocosaj BuildTools commited on
Commit
3cb9ad4
Β·
unverified Β·
1 Parent(s): fdbe527

Update FLOPs description (#3422)

Browse files

* Update README.md

* Changing FLOPS to FLOPs.

Co-authored-by: BuildTools <[email protected]>

Files changed (5) hide show
  1. README.md +2 -2
  2. models/yolo.py +3 -3
  3. requirements.txt +1 -1
  4. tutorial.ipynb +3 -3
  5. utils/torch_utils.py +6 -6
README.md CHANGED
@@ -30,7 +30,7 @@ This repository represents Ultralytics open-source research into future object d
30
 
31
  [assets]: https://github.com/ultralytics/yolov5/releases
32
 
33
- Model |size<br><sup>(pixels) |mAP<sup>val<br>0.5:0.95 |mAP<sup>test<br>0.5:0.95 |mAP<sup>val<br>0.5 |Speed<br><sup>V100 (ms) | |params<br><sup>(M) |FLOPS<br><sup>640 (B)
34
  --- |--- |--- |--- |--- |--- |---|--- |---
35
  [YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0
36
  [YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3
@@ -112,7 +112,7 @@ Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, devi
112
  YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)
113
 
114
  Fusing layers...
115
- Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS
116
  image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s)
117
  image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s)
118
  Results saved to runs/detect/exp2
 
30
 
31
  [assets]: https://github.com/ultralytics/yolov5/releases
32
 
33
+ Model |size<br><sup>(pixels) |mAP<sup>val<br>0.5:0.95 |mAP<sup>test<br>0.5:0.95 |mAP<sup>val<br>0.5 |Speed<br><sup>V100 (ms) | |params<br><sup>(M) |<br><sup>640 (B)
34
  --- |--- |--- |--- |--- |--- |---|--- |---
35
  [YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0
36
  [YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3
 
112
  YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)
113
 
114
  Fusing layers...
115
+ Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs
116
  image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s)
117
  image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s)
118
  Results saved to runs/detect/exp2
models/yolo.py CHANGED
@@ -21,7 +21,7 @@ from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, s
21
  select_device, copy_attr
22
 
23
  try:
24
- import thop # for FLOPS computation
25
  except ImportError:
26
  thop = None
27
 
@@ -140,13 +140,13 @@ class Model(nn.Module):
140
  x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
141
 
142
  if profile:
143
- o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
144
  t = time_synchronized()
145
  for _ in range(10):
146
  _ = m(x)
147
  dt.append((time_synchronized() - t) * 100)
148
  if m == self.model[0]:
149
- logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}")
150
  logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
151
 
152
  x = m(x) # run
 
21
  select_device, copy_attr
22
 
23
  try:
24
+ import thop # for FLOPs computation
25
  except ImportError:
26
  thop = None
27
 
 
140
  x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
141
 
142
  if profile:
143
+ o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
144
  t = time_synchronized()
145
  for _ in range(10):
146
  _ = m(x)
147
  dt.append((time_synchronized() - t) * 100)
148
  if m == self.model[0]:
149
+ logger.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}")
150
  logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
151
 
152
  x = m(x) # run
requirements.txt CHANGED
@@ -27,4 +27,4 @@ pandas
27
  # extras --------------------------------------
28
  # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
29
  pycocotools>=2.0 # COCO mAP
30
- thop # FLOPS computation
 
27
  # extras --------------------------------------
28
  # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
29
  pycocotools>=2.0 # COCO mAP
30
+ thop # FLOPs computation
tutorial.ipynb CHANGED
@@ -611,7 +611,7 @@
611
  "YOLOv5 πŸš€ v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
612
  "\n",
613
  "Fusing layers... \n",
614
- "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n",
615
  "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n",
616
  "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n",
617
  "Results saved to runs/detect/exp\n",
@@ -734,7 +734,7 @@
734
  "100% 168M/168M [00:05<00:00, 32.3MB/s]\n",
735
  "\n",
736
  "Fusing layers... \n",
737
- "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n",
738
  "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n",
739
  "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n",
740
  " Class Images Labels P R [email protected] [email protected]:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n",
@@ -964,7 +964,7 @@
964
  " 22 [-1, 10] 1 0 models.common.Concat [1] \n",
965
  " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
966
  " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
967
- "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n",
968
  "\n",
969
  "Transferred 362/362 items from yolov5s.pt\n",
970
  "Scaled weight_decay = 0.0005\n",
 
611
  "YOLOv5 πŸš€ v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
612
  "\n",
613
  "Fusing layers... \n",
614
+ "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs\n",
615
  "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n",
616
  "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n",
617
  "Results saved to runs/detect/exp\n",
 
734
  "100% 168M/168M [00:05<00:00, 32.3MB/s]\n",
735
  "\n",
736
  "Fusing layers... \n",
737
+ "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPs\n",
738
  "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n",
739
  "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n",
740
  " Class Images Labels P R [email protected] [email protected]:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n",
 
964
  " 22 [-1, 10] 1 0 models.common.Concat [1] \n",
965
  " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
966
  " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
967
+ "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n",
968
  "\n",
969
  "Transferred 362/362 items from yolov5s.pt\n",
970
  "Scaled weight_decay = 0.0005\n",
utils/torch_utils.py CHANGED
@@ -18,7 +18,7 @@ import torch.nn.functional as F
18
  import torchvision
19
 
20
  try:
21
- import thop # for FLOPS computation
22
  except ImportError:
23
  thop = None
24
  logger = logging.getLogger(__name__)
@@ -105,13 +105,13 @@ def profile(x, ops, n=100, device=None):
105
  x = x.to(device)
106
  x.requires_grad = True
107
  print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
108
- print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
109
  for m in ops if isinstance(ops, list) else [ops]:
110
  m = m.to(device) if hasattr(m, 'to') else m # device
111
  m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
112
  dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
113
  try:
114
- flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
115
  except:
116
  flops = 0
117
 
@@ -219,13 +219,13 @@ def model_info(model, verbose=False, img_size=640):
219
  print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
220
  (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
221
 
222
- try: # FLOPS
223
  from thop import profile
224
  stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
225
  img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
226
- flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
227
  img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
228
- fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
229
  except (ImportError, Exception):
230
  fs = ''
231
 
 
18
  import torchvision
19
 
20
  try:
21
+ import thop # for FLOPs computation
22
  except ImportError:
23
  thop = None
24
  logger = logging.getLogger(__name__)
 
105
  x = x.to(device)
106
  x.requires_grad = True
107
  print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
108
+ print(f"\n{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
109
  for m in ops if isinstance(ops, list) else [ops]:
110
  m = m.to(device) if hasattr(m, 'to') else m # device
111
  m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
112
  dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
113
  try:
114
+ flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
115
  except:
116
  flops = 0
117
 
 
219
  print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
220
  (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
221
 
222
+ try: # FLOPs
223
  from thop import profile
224
  stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
225
  img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
226
+ flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
227
  img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
228
+ fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs
229
  except (ImportError, Exception):
230
  fs = ''
231