Laughing commited on
Commit
41ab1b2
·
unverified ·
2 Parent(s): 956511d 520f5de

Merge pull request #1 from ultralytics/master

Browse files
.github/ISSUE_TEMPLATE/-question.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "❓Question"
3
+ about: Ask a general question
4
+ title: ''
5
+ labels: question
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ ## ❔Question
11
+
12
+
13
+ ## Additional context
README.md CHANGED
@@ -41,9 +41,13 @@ $ pip install -U -r requirements.txt
41
  ## Tutorials
42
 
43
  * [Notebook](https://github.com/ultralytics/yolov5/blob/master/tutorial.ipynb) <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
 
44
  * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)
45
- * [Google Cloud Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)
46
- * [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker)
 
 
 
47
 
48
 
49
  ## Inference
 
41
  ## Tutorials
42
 
43
  * [Notebook](https://github.com/ultralytics/yolov5/blob/master/tutorial.ipynb) <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
44
+ * [Kaggle](https://www.kaggle.com/ultralytics/yolov5-tutorial)
45
  * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)
46
+ * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)
47
+ * [ONNX and TorchScript Export](https://github.com/ultralytics/yolov5/issues/251)
48
+ * [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303)
49
+ * [Google Cloud Quickstart](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)
50
+ * [Docker Quickstart](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker)
51
 
52
 
53
  ## Inference
detect.py CHANGED
@@ -2,7 +2,7 @@ import argparse
2
 
3
  import torch.backends.cudnn as cudnn
4
 
5
- from utils import google_utils
6
  from utils.datasets import *
7
  from utils.utils import *
8
 
@@ -20,12 +20,8 @@ def detect(save_img=False):
20
  half = device.type != 'cpu' # half precision only supported on CUDA
21
 
22
  # Load model
23
- google_utils.attempt_download(weights)
24
- model = torch.load(weights, map_location=device)['model'].float() # load to FP32
25
- # torch.save(torch.load(weights, map_location=device), weights) # update model if SourceChangeWarning
26
- # model.fuse()
27
- model.to(device).eval()
28
- imgsz = check_img_size(imgsz, s=model.model[-1].stride.max()) # check img_size
29
  if half:
30
  model.half() # to FP16
31
 
@@ -123,10 +119,11 @@ def detect(save_img=False):
123
  if isinstance(vid_writer, cv2.VideoWriter):
124
  vid_writer.release() # release previous video writer
125
 
 
126
  fps = vid_cap.get(cv2.CAP_PROP_FPS)
127
  w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
128
  h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
129
- vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*opt.fourcc), fps, (w, h))
130
  vid_writer.write(im0)
131
 
132
  if save_txt or save_img:
@@ -139,26 +136,26 @@ def detect(save_img=False):
139
 
140
  if __name__ == '__main__':
141
  parser = argparse.ArgumentParser()
142
- parser.add_argument('--weights', type=str, default='weights/yolov5s.pt', help='model.pt path')
143
  parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
144
  parser.add_argument('--output', type=str, default='inference/output', help='output folder') # output folder
145
  parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
146
  parser.add_argument('--conf-thres', type=float, default=0.4, help='object confidence threshold')
147
  parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
148
- parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
149
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
150
  parser.add_argument('--view-img', action='store_true', help='display results')
151
  parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
152
  parser.add_argument('--classes', nargs='+', type=int, help='filter by class')
153
  parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
154
  parser.add_argument('--augment', action='store_true', help='augmented inference')
 
155
  opt = parser.parse_args()
156
  print(opt)
157
 
158
  with torch.no_grad():
159
- detect()
160
-
161
- # # Update all models
162
- # for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov3-spp.pt']:
163
- # detect()
164
- # create_pretrained(opt.weights, opt.weights)
 
2
 
3
  import torch.backends.cudnn as cudnn
4
 
5
+ from models.experimental import *
6
  from utils.datasets import *
7
  from utils.utils import *
8
 
 
20
  half = device.type != 'cpu' # half precision only supported on CUDA
21
 
22
  # Load model
23
+ model = attempt_load(weights, map_location=device) # load FP32 model
24
+ imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
 
 
 
 
25
  if half:
26
  model.half() # to FP16
27
 
 
119
  if isinstance(vid_writer, cv2.VideoWriter):
120
  vid_writer.release() # release previous video writer
121
 
122
+ fourcc = 'mp4v' # output video codec
123
  fps = vid_cap.get(cv2.CAP_PROP_FPS)
124
  w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
125
  h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
126
+ vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
127
  vid_writer.write(im0)
128
 
129
  if save_txt or save_img:
 
136
 
137
  if __name__ == '__main__':
138
  parser = argparse.ArgumentParser()
139
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
140
  parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
141
  parser.add_argument('--output', type=str, default='inference/output', help='output folder') # output folder
142
  parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
143
  parser.add_argument('--conf-thres', type=float, default=0.4, help='object confidence threshold')
144
  parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
 
145
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
146
  parser.add_argument('--view-img', action='store_true', help='display results')
147
  parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
148
  parser.add_argument('--classes', nargs='+', type=int, help='filter by class')
149
  parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
150
  parser.add_argument('--augment', action='store_true', help='augmented inference')
151
+ parser.add_argument('--update', action='store_true', help='update all models')
152
  opt = parser.parse_args()
153
  print(opt)
154
 
155
  with torch.no_grad():
156
+ if opt.update: # update all models (to fix SourceChangeWarning)
157
+ for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov3-spp.pt']:
158
+ detect()
159
+ create_pretrained(opt.weights, opt.weights)
160
+ else:
161
+ detect()
models/experimental.py CHANGED
@@ -1,6 +1,7 @@
1
  # This file contains experimental modules
2
 
3
  from models.common import *
 
4
 
5
 
6
  class CrossConv(nn.Module):
@@ -107,3 +108,34 @@ class MixConv2d(nn.Module):
107
 
108
  def forward(self, x):
109
  return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # This file contains experimental modules
2
 
3
  from models.common import *
4
+ from utils import google_utils
5
 
6
 
7
  class CrossConv(nn.Module):
 
108
 
109
  def forward(self, x):
110
  return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
111
+
112
+
113
+ class Ensemble(nn.ModuleList):
114
+ # Ensemble of models
115
+ def __init__(self):
116
+ super(Ensemble, self).__init__()
117
+
118
+ def forward(self, x, augment=False):
119
+ y = []
120
+ for module in self:
121
+ y.append(module(x, augment)[0])
122
+ # y = torch.stack(y).max(0)[0] # max ensemble
123
+ # y = torch.cat(y, 1) # nms ensemble
124
+ y = torch.stack(y).mean(0) # mean ensemble
125
+ return y, None # inference, train output
126
+
127
+
128
+ def attempt_load(weights, map_location=None):
129
+ # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
130
+ model = Ensemble()
131
+ for w in weights if isinstance(weights, list) else [weights]:
132
+ google_utils.attempt_download(w)
133
+ model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
134
+
135
+ if len(model) == 1:
136
+ return model[-1] # return model
137
+ else:
138
+ print('Ensemble created with %s\n' % weights)
139
+ for k in ['names', 'stride']:
140
+ setattr(model, k, getattr(model[-1], k))
141
+ return model # return ensemble
models/export.py CHANGED
@@ -61,7 +61,8 @@ if __name__ == '__main__':
61
  import coremltools as ct
62
 
63
  print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
64
- model = ct.convert(ts, inputs=[ct.ImageType(name='images', shape=img.shape)]) # convert
 
65
  f = opt.weights.replace('.pt', '.mlmodel') # filename
66
  model.save(f)
67
  print('CoreML export success, saved as %s' % f)
 
61
  import coremltools as ct
62
 
63
  print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
64
+ # convert model from torchscript and apply pixel scaling as per detect.py
65
+ model = ct.convert(ts, inputs=[ct.ImageType(name='images', shape=img.shape, scale=1/255.0, bias=[0, 0, 0])])
66
  f = opt.weights.replace('.pt', '.mlmodel') # filename
67
  model.save(f)
68
  print('CoreML export success, saved as %s' % f)
models/yolo.py CHANGED
@@ -48,6 +48,7 @@ class Model(nn.Module):
48
  if type(model_cfg) is dict:
49
  self.md = model_cfg # model dict
50
  else: # is *.yaml
 
51
  with open(model_cfg) as f:
52
  self.md = yaml.load(f, Loader=yaml.FullLoader) # model dict
53
 
@@ -141,14 +142,14 @@ class Model(nn.Module):
141
  # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
142
 
143
  def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
144
- print('Fusing layers...')
145
  for m in self.model.modules():
146
  if type(m) is Conv:
147
  m.conv = torch_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv
148
  m.bn = None # remove batchnorm
149
  m.forward = m.fuseforward # update forward
150
  torch_utils.model_info(self)
151
-
152
 
153
  def parse_model(md, ch): # model_dict, input_channels(3)
154
  print('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
 
48
  if type(model_cfg) is dict:
49
  self.md = model_cfg # model dict
50
  else: # is *.yaml
51
+ import yaml # for torch hub
52
  with open(model_cfg) as f:
53
  self.md = yaml.load(f, Loader=yaml.FullLoader) # model dict
54
 
 
142
  # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
143
 
144
  def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
145
+ print('Fusing layers... ', end='')
146
  for m in self.model.modules():
147
  if type(m) is Conv:
148
  m.conv = torch_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv
149
  m.bn = None # remove batchnorm
150
  m.forward = m.fuseforward # update forward
151
  torch_utils.model_info(self)
152
+ return self
153
 
154
  def parse_model(md, ch): # model_dict, input_channels(3)
155
  print('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
requirements.txt CHANGED
@@ -2,7 +2,7 @@
2
  Cython
3
  numpy==1.17
4
  opencv-python
5
- torch>=1.4
6
  matplotlib
7
  pillow
8
  tensorboard
 
2
  Cython
3
  numpy==1.17
4
  opencv-python
5
+ torch>=1.5.1
6
  matplotlib
7
  pillow
8
  tensorboard
test.py CHANGED
@@ -1,9 +1,8 @@
1
  import argparse
2
  import json
3
 
4
- from utils import google_utils
5
  from utils.datasets import *
6
- from utils.utils import *
7
 
8
 
9
  def test(data,
@@ -18,32 +17,29 @@ def test(data,
18
  verbose=False,
19
  model=None,
20
  dataloader=None,
 
21
  merge=False):
22
  # Initialize/load model and set device
23
- if model is None:
24
- training = False
 
 
 
25
  device = torch_utils.select_device(opt.device, batch_size=batch_size)
 
26
 
27
  # Remove previous
28
- for f in glob.glob('test_batch*.jpg'):
29
  os.remove(f)
30
 
31
  # Load model
32
- google_utils.attempt_download(weights)
33
- model = torch.load(weights, map_location=device)['model'].float() # load to FP32
34
- torch_utils.model_info(model)
35
- model.fuse()
36
- model.to(device)
37
- imgsz = check_img_size(imgsz, s=model.model[-1].stride.max()) # check img_size
38
 
39
  # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
40
  # if device.type != 'cpu' and torch.cuda.device_count() > 1:
41
  # model = nn.DataParallel(model)
42
 
43
- else: # called by train.py
44
- training = True
45
- device = next(model.parameters()).device # get model device
46
-
47
  # Half
48
  half = device.type != 'cpu' and torch.cuda.device_count() == 1 # half precision only supported on single-GPU
49
  if half:
@@ -58,12 +54,11 @@ def test(data,
58
  niou = iouv.numel()
59
 
60
  # Dataloader
61
- if dataloader is None: # not training
62
- merge = opt.merge # use Merge NMS
63
  img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
64
  _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
65
  path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
66
- dataloader = create_dataloader(path, imgsz, batch_size, int(max(model.stride)), opt,
67
  hyp=None, augment=False, cache=False, pad=0.5, rect=True)[0]
68
 
69
  seen = 0
@@ -163,10 +158,10 @@ def test(data,
163
 
164
  # Plot images
165
  if batch_i < 1:
166
- f = 'test_batch%g_gt.jpg' % batch_i # filename
167
- plot_images(img, targets, paths, f, names) # ground truth
168
- f = 'test_batch%g_pred.jpg' % batch_i
169
- plot_images(img, output_to_target(output, width, height), paths, f, names) # predictions
170
 
171
  # Compute statistics
172
  stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
@@ -196,7 +191,7 @@ def test(data,
196
  if save_json and map50 and len(jdict):
197
  imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataloader.dataset.img_files]
198
  f = 'detections_val2017_%s_results.json' % \
199
- (weights.split(os.sep)[-1].replace('.pt', '') if weights else '') # filename
200
  print('\nCOCO mAP with pycocotools... saving %s...' % f)
201
  with open(f, 'w') as file:
202
  json.dump(jdict, file)
@@ -229,7 +224,7 @@ def test(data,
229
 
230
  if __name__ == '__main__':
231
  parser = argparse.ArgumentParser(prog='test.py')
232
- parser.add_argument('--weights', type=str, default='weights/yolov5s.pt', help='model.pt path')
233
  parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
234
  parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
235
  parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
 
1
  import argparse
2
  import json
3
 
4
+ from models.experimental import *
5
  from utils.datasets import *
 
6
 
7
 
8
  def test(data,
 
17
  verbose=False,
18
  model=None,
19
  dataloader=None,
20
+ save_dir='',
21
  merge=False):
22
  # Initialize/load model and set device
23
+ training = model is not None
24
+ if training: # called by train.py
25
+ device = next(model.parameters()).device # get model device
26
+
27
+ else: # called directly
28
  device = torch_utils.select_device(opt.device, batch_size=batch_size)
29
+ merge = opt.merge # use Merge NMS
30
 
31
  # Remove previous
32
+ for f in glob.glob(str(Path(save_dir) / 'test_batch*.jpg')):
33
  os.remove(f)
34
 
35
  # Load model
36
+ model = attempt_load(weights, map_location=device) # load FP32 model
37
+ imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
 
 
 
 
38
 
39
  # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
40
  # if device.type != 'cpu' and torch.cuda.device_count() > 1:
41
  # model = nn.DataParallel(model)
42
 
 
 
 
 
43
  # Half
44
  half = device.type != 'cpu' and torch.cuda.device_count() == 1 # half precision only supported on single-GPU
45
  if half:
 
54
  niou = iouv.numel()
55
 
56
  # Dataloader
57
+ if not training:
 
58
  img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
59
  _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
60
  path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
61
+ dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt,
62
  hyp=None, augment=False, cache=False, pad=0.5, rect=True)[0]
63
 
64
  seen = 0
 
158
 
159
  # Plot images
160
  if batch_i < 1:
161
+ f = Path(save_dir) / ('test_batch%g_gt.jpg' % batch_i) # filename
162
+ plot_images(img, targets, paths, str(f), names) # ground truth
163
+ f = Path(save_dir) / ('test_batch%g_pred.jpg' % batch_i)
164
+ plot_images(img, output_to_target(output, width, height), paths, str(f), names) # predictions
165
 
166
  # Compute statistics
167
  stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
 
191
  if save_json and map50 and len(jdict):
192
  imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataloader.dataset.img_files]
193
  f = 'detections_val2017_%s_results.json' % \
194
+ (weights.split(os.sep)[-1].replace('.pt', '') if isinstance(weights, str) else '') # filename
195
  print('\nCOCO mAP with pycocotools... saving %s...' % f)
196
  with open(f, 'w') as file:
197
  json.dump(jdict, file)
 
224
 
225
  if __name__ == '__main__':
226
  parser = argparse.ArgumentParser(prog='test.py')
227
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
228
  parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
229
  parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
230
  parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
train.py CHANGED
@@ -20,15 +20,10 @@ except:
20
  print('Apex recommended for faster mixed precision training: https://github.com/NVIDIA/apex')
21
  mixed_precision = False # not installed
22
 
23
- wdir = 'weights' + os.sep # weights dir
24
- os.makedirs(wdir, exist_ok=True)
25
- last = wdir + 'last.pt'
26
- best = wdir + 'best.pt'
27
- results_file = 'results.txt'
28
-
29
  # Hyperparameters
30
- hyp = {'lr0': 0.01, # initial learning rate (SGD=1E-2, Adam=1E-3)
31
- 'momentum': 0.937, # SGD momentum
 
32
  'weight_decay': 5e-4, # optimizer weight decay
33
  'giou': 0.05, # giou loss gain
34
  'cls': 0.58, # cls loss gain
@@ -45,21 +40,24 @@ hyp = {'lr0': 0.01, # initial learning rate (SGD=1E-2, Adam=1E-3)
45
  'translate': 0.0, # image translation (+/- fraction)
46
  'scale': 0.5, # image scale (+/- gain)
47
  'shear': 0.0} # image shear (+/- deg)
48
- print(hyp)
49
 
50
- # Overwrite hyp with hyp*.txt (optional)
51
- f = glob.glob('hyp*.txt')
52
- if f:
53
- print('Using %s' % f[0])
54
- for k, v in zip(hyp.keys(), np.loadtxt(f[0])):
55
- hyp[k] = v
56
 
57
- # Print focal loss if gamma > 0
58
- if hyp['fl_gamma']:
59
- print('Using FocalLoss(gamma=%g)' % hyp['fl_gamma'])
 
60
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- def train(hyp):
63
  epochs = opt.epochs # 300
64
  batch_size = opt.batch_size # 64
65
  weights = opt.weights # initial training weights
@@ -70,14 +68,15 @@ def train(hyp):
70
  data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
71
  train_path = data_dict['train']
72
  test_path = data_dict['val']
73
- nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
 
74
 
75
  # Remove previous results
76
  for f in glob.glob('*_batch*.jpg') + glob.glob(results_file):
77
  os.remove(f)
78
 
79
  # Create model
80
- model = Model(opt.cfg, nc=data_dict['nc']).to(device)
81
 
82
  # Image sizes
83
  gs = int(max(model.stride)) # grid size (max stride)
@@ -97,15 +96,20 @@ def train(hyp):
97
  else:
98
  pg0.append(v) # all else
99
 
100
- optimizer = optim.Adam(pg0, lr=hyp['lr0']) if opt.adam else \
101
- optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
 
 
 
102
  optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
103
  optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
 
 
 
104
  # Scheduler https://arxiv.org/pdf/1812.01187.pdf
105
  lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.9 + 0.1 # cosine
106
  scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
107
- print('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
108
- del pg0, pg1, pg2
109
 
110
  # Load Model
111
  google_utils.attempt_download(weights)
@@ -147,12 +151,7 @@ def train(hyp):
147
  if mixed_precision:
148
  model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)
149
 
150
-
151
- scheduler.last_epoch = start_epoch - 1 # do not move
152
- # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822
153
- # plot_lr_scheduler(optimizer, scheduler, epochs)
154
-
155
- # Initialize distributed training
156
  if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available():
157
  dist.init_process_group(backend='nccl', # distributed backend
158
  init_method='tcp://127.0.0.1:9999', # init method
@@ -165,6 +164,7 @@ def train(hyp):
165
  dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
166
  hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect)
167
  mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
 
168
  assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Correct your labels or your model.' % (mlc, nc, opt.cfg)
169
 
170
  # Testloader
@@ -177,15 +177,15 @@ def train(hyp):
177
  model.hyp = hyp # attach hyperparameters to model
178
  model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
179
  model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
180
- model.names = data_dict['names']
181
 
182
  # Class frequency
183
  labels = np.concatenate(dataset.labels, 0)
184
  c = torch.tensor(labels[:, 0]) # classes
185
  # cf = torch.bincount(c.long(), minlength=nc) + 1.
186
  # model._initialize_biases(cf.to(device))
 
187
  if tb_writer:
188
- plot_labels(labels)
189
  tb_writer.add_histogram('classes', c, 0)
190
 
191
  # Check anchors
@@ -193,14 +193,14 @@ def train(hyp):
193
  check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
194
 
195
  # Exponential moving average
196
- ema = torch_utils.ModelEMA(model)
197
 
198
  # Start training
199
  t0 = time.time()
200
- nb = len(dataloader) # number of batches
201
- n_burn = max(3 * nb, 1e3) # burn-in iterations, max(3 epochs, 1k iterations)
202
  maps = np.zeros(nc) # mAP per class
203
  results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
 
204
  print('Image sizes %g train, %g test' % (imgsz, imgsz_test))
205
  print('Using %g dataloader workers' % dataloader.num_workers)
206
  print('Starting training for %g epochs...' % epochs)
@@ -225,9 +225,9 @@ def train(hyp):
225
  ni = i + nb * epoch # number integrated batches (since train start)
226
  imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
227
 
228
- # Burn-in
229
- if ni <= n_burn:
230
- xi = [0, n_burn] # x interp
231
  # model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
232
  accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
233
  for j, x in enumerate(optimizer.param_groups):
@@ -275,7 +275,7 @@ def train(hyp):
275
 
276
  # Plot
277
  if ni < 3:
278
- f = 'train_batch%g.jpg' % ni # filename
279
  result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
280
  if tb_writer and result is not None:
281
  tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
@@ -296,7 +296,8 @@ def train(hyp):
296
  save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'),
297
  model=ema.ema,
298
  single_cls=opt.single_cls,
299
- dataloader=testloader)
 
300
 
301
  # Write
302
  with open(results_file, 'a') as f:
@@ -348,7 +349,7 @@ def train(hyp):
348
 
349
  # Finish
350
  if not opt.evolve:
351
- plot_results() # save as results.png
352
  print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
353
  dist.destroy_process_group() if device.type != 'cpu' and torch.cuda.device_count() > 1 else None
354
  torch.cuda.empty_cache()
@@ -358,13 +359,15 @@ def train(hyp):
358
  if __name__ == '__main__':
359
  check_git_status()
360
  parser = argparse.ArgumentParser()
 
 
 
361
  parser.add_argument('--epochs', type=int, default=300)
362
  parser.add_argument('--batch-size', type=int, default=16)
363
- parser.add_argument('--cfg', type=str, default='models/yolov5s.yaml', help='*.cfg path')
364
- parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
365
  parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='train,test sizes')
366
  parser.add_argument('--rect', action='store_true', help='rectangular training')
367
- parser.add_argument('--resume', action='store_true', help='resume training from last.pt')
 
368
  parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
369
  parser.add_argument('--notest', action='store_true', help='only test final epoch')
370
  parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
@@ -374,13 +377,17 @@ if __name__ == '__main__':
374
  parser.add_argument('--weights', type=str, default='', help='initial weights path')
375
  parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')
376
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
377
- parser.add_argument('--adam', action='store_true', help='use adam optimizer')
378
- parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%')
379
  parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
380
  opt = parser.parse_args()
 
 
 
 
381
  opt.weights = last if opt.resume and not opt.weights else opt.weights
382
  opt.cfg = check_file(opt.cfg) # check file
383
  opt.data = check_file(opt.data) # check file
 
384
  print(opt)
385
  opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
386
  device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)
@@ -389,8 +396,12 @@ if __name__ == '__main__':
389
 
390
  # Train
391
  if not opt.evolve:
392
- tb_writer = SummaryWriter(comment=opt.name)
393
  print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
 
 
 
 
 
394
  train(hyp)
395
 
396
  # Evolve hyperparameters (optional)
 
20
  print('Apex recommended for faster mixed precision training: https://github.com/NVIDIA/apex')
21
  mixed_precision = False # not installed
22
 
 
 
 
 
 
 
23
  # Hyperparameters
24
+ hyp = {'optimizer': 'SGD', # ['adam', 'SGD', None] if none, default is SGD
25
+ 'lr0': 0.01, # initial learning rate (SGD=1E-2, Adam=1E-3)
26
+ 'momentum': 0.937, # SGD momentum/Adam beta1
27
  'weight_decay': 5e-4, # optimizer weight decay
28
  'giou': 0.05, # giou loss gain
29
  'cls': 0.58, # cls loss gain
 
40
  'translate': 0.0, # image translation (+/- fraction)
41
  'scale': 0.5, # image scale (+/- gain)
42
  'shear': 0.0} # image shear (+/- deg)
 
43
 
 
 
 
 
 
 
44
 
45
+ def train(hyp):
46
+ print(f'Hyperparameters {hyp}')
47
+ log_dir = tb_writer.log_dir # run directory
48
+ wdir = str(Path(log_dir) / 'weights') + os.sep # weights directory
49
 
50
+ os.makedirs(wdir, exist_ok=True)
51
+ last = wdir + 'last.pt'
52
+ best = wdir + 'best.pt'
53
+ results_file = log_dir + os.sep + 'results.txt'
54
+
55
+ # Save run settings
56
+ with open(Path(log_dir) / 'hyp.yaml', 'w') as f:
57
+ yaml.dump(hyp, f, sort_keys=False)
58
+ with open(Path(log_dir) / 'opt.yaml', 'w') as f:
59
+ yaml.dump(vars(opt), f, sort_keys=False)
60
 
 
61
  epochs = opt.epochs # 300
62
  batch_size = opt.batch_size # 64
63
  weights = opt.weights # initial training weights
 
68
  data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
69
  train_path = data_dict['train']
70
  test_path = data_dict['val']
71
+ nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names
72
+ assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
73
 
74
  # Remove previous results
75
  for f in glob.glob('*_batch*.jpg') + glob.glob(results_file):
76
  os.remove(f)
77
 
78
  # Create model
79
+ model = Model(opt.cfg, nc=nc).to(device)
80
 
81
  # Image sizes
82
  gs = int(max(model.stride)) # grid size (max stride)
 
96
  else:
97
  pg0.append(v) # all else
98
 
99
+ if hyp['optimizer'] == 'adam': # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
100
+ optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
101
+ else:
102
+ optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
103
+
104
  optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
105
  optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
106
+ print('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
107
+ del pg0, pg1, pg2
108
+
109
  # Scheduler https://arxiv.org/pdf/1812.01187.pdf
110
  lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.9 + 0.1 # cosine
111
  scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
112
+ # plot_lr_scheduler(optimizer, scheduler, epochs, save_dir=log_dir)
 
113
 
114
  # Load Model
115
  google_utils.attempt_download(weights)
 
151
  if mixed_precision:
152
  model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)
153
 
154
+ # Distributed training
 
 
 
 
 
155
  if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available():
156
  dist.init_process_group(backend='nccl', # distributed backend
157
  init_method='tcp://127.0.0.1:9999', # init method
 
164
  dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
165
  hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect)
166
  mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
167
+ nb = len(dataloader) # number of batches
168
  assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Correct your labels or your model.' % (mlc, nc, opt.cfg)
169
 
170
  # Testloader
 
177
  model.hyp = hyp # attach hyperparameters to model
178
  model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
179
  model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
180
+ model.names = names
181
 
182
  # Class frequency
183
  labels = np.concatenate(dataset.labels, 0)
184
  c = torch.tensor(labels[:, 0]) # classes
185
  # cf = torch.bincount(c.long(), minlength=nc) + 1.
186
  # model._initialize_biases(cf.to(device))
187
+ plot_labels(labels, save_dir=log_dir)
188
  if tb_writer:
 
189
  tb_writer.add_histogram('classes', c, 0)
190
 
191
  # Check anchors
 
193
  check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
194
 
195
  # Exponential moving average
196
+ ema = torch_utils.ModelEMA(model, updates=start_epoch * nb / accumulate)
197
 
198
  # Start training
199
  t0 = time.time()
200
+ nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
 
201
  maps = np.zeros(nc) # mAP per class
202
  results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
203
+ scheduler.last_epoch = start_epoch - 1 # do not move
204
  print('Image sizes %g train, %g test' % (imgsz, imgsz_test))
205
  print('Using %g dataloader workers' % dataloader.num_workers)
206
  print('Starting training for %g epochs...' % epochs)
 
225
  ni = i + nb * epoch # number integrated batches (since train start)
226
  imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
227
 
228
+ # Warmup
229
+ if ni <= nw:
230
+ xi = [0, nw] # x interp
231
  # model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
232
  accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
233
  for j, x in enumerate(optimizer.param_groups):
 
275
 
276
  # Plot
277
  if ni < 3:
278
+ f = str(Path(log_dir) / ('train_batch%g.jpg' % ni)) # filename
279
  result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
280
  if tb_writer and result is not None:
281
  tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
 
296
  save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'),
297
  model=ema.ema,
298
  single_cls=opt.single_cls,
299
+ dataloader=testloader,
300
+ save_dir=log_dir)
301
 
302
  # Write
303
  with open(results_file, 'a') as f:
 
349
 
350
  # Finish
351
  if not opt.evolve:
352
+ plot_results(save_dir=log_dir) # save as results.png
353
  print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
354
  dist.destroy_process_group() if device.type != 'cpu' and torch.cuda.device_count() > 1 else None
355
  torch.cuda.empty_cache()
 
359
  if __name__ == '__main__':
360
  check_git_status()
361
  parser = argparse.ArgumentParser()
362
+ parser.add_argument('--cfg', type=str, default='models/yolov5s.yaml', help='model.yaml path')
363
+ parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
364
+ parser.add_argument('--hyp', type=str, default='', help='hyp.yaml path (optional)')
365
  parser.add_argument('--epochs', type=int, default=300)
366
  parser.add_argument('--batch-size', type=int, default=16)
 
 
367
  parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='train,test sizes')
368
  parser.add_argument('--rect', action='store_true', help='rectangular training')
369
+ parser.add_argument('--resume', nargs='?', const='get_last', default=False,
370
+ help='resume from given path/to/last.pt, or most recent run if blank.')
371
  parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
372
  parser.add_argument('--notest', action='store_true', help='only test final epoch')
373
  parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
 
377
  parser.add_argument('--weights', type=str, default='', help='initial weights path')
378
  parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')
379
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
380
+ parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
 
381
  parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
382
  opt = parser.parse_args()
383
+
384
+ last = get_latest_run() if opt.resume == 'get_last' else opt.resume # resume from most recent run
385
+ if last and not opt.weights:
386
+ print(f'Resuming training from {last}')
387
  opt.weights = last if opt.resume and not opt.weights else opt.weights
388
  opt.cfg = check_file(opt.cfg) # check file
389
  opt.data = check_file(opt.data) # check file
390
+ opt.hyp = check_file(opt.hyp) if opt.hyp else '' # check file
391
  print(opt)
392
  opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
393
  device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)
 
396
 
397
  # Train
398
  if not opt.evolve:
 
399
  print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
400
+ tb_writer = SummaryWriter(log_dir=increment_dir('runs/exp', opt.name))
401
+ if opt.hyp: # update hyps
402
+ with open(opt.hyp) as f:
403
+ hyp.update(yaml.load(f, Loader=yaml.FullLoader))
404
+
405
  train(hyp)
406
 
407
  # Evolve hyperparameters (optional)
utils/datasets.py CHANGED
@@ -26,6 +26,11 @@ for orientation in ExifTags.TAGS.keys():
26
  break
27
 
28
 
 
 
 
 
 
29
  def exif_size(img):
30
  # Returns exif-corrected PIL size
31
  s = img.size # (width, height)
@@ -48,7 +53,7 @@ def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=Fa
48
  rect=rect, # rectangular training
49
  cache_images=cache,
50
  single_cls=opt.single_cls,
51
- stride=stride,
52
  pad=pad)
53
 
54
  batch_size = min(batch_size, len(dataset))
@@ -280,19 +285,21 @@ class LoadImagesAndLabels(Dataset): # for training/testing
280
  def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
281
  cache_images=False, single_cls=False, stride=32, pad=0.0):
282
  try:
283
- path = str(Path(path)) # os-agnostic
284
- parent = str(Path(path).parent) + os.sep
285
- if os.path.isfile(path): # file
286
- with open(path, 'r') as f:
287
- f = f.read().splitlines()
288
- f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path
289
- elif os.path.isdir(path): # folder
290
- f = glob.iglob(path + os.sep + '*.*')
291
- else:
292
- raise Exception('%s does not exist' % path)
 
 
293
  self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
294
- except:
295
- raise Exception('Error loading data from %s. See %s' % (path, help_url))
296
 
297
  n = len(self.img_files)
298
  assert n > 0, 'No images found in %s. See %s' % (path, help_url)
@@ -311,20 +318,22 @@ class LoadImagesAndLabels(Dataset): # for training/testing
311
  self.stride = stride
312
 
313
  # Define labels
314
- self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
315
- for x in self.img_files]
316
-
317
- # Read image shapes (wh)
318
- sp = path.replace('.txt', '') + '.shapes' # shapefile path
319
- try:
320
- with open(sp, 'r') as f: # read existing shapefile
321
- s = [x.split() for x in f.read().splitlines()]
322
- assert len(s) == n, 'Shapefile out of sync'
323
- except:
324
- s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
325
- np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
326
 
327
- self.shapes = np.array(s, dtype=np.float64)
 
 
 
328
 
329
  # Rectangular Training https://github.com/ultralytics/yolov3/issues/232
330
  if self.rect:
@@ -350,33 +359,11 @@ class LoadImagesAndLabels(Dataset): # for training/testing
350
  self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
351
 
352
  # Cache labels
353
- self.imgs = [None] * n
354
- self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
355
  create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
356
  nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
357
- np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file
358
- if os.path.isfile(np_labels_path):
359
- s = np_labels_path # print string
360
- x = np.load(np_labels_path, allow_pickle=True)
361
- if len(x) == n:
362
- self.labels = x
363
- labels_loaded = True
364
- else:
365
- s = path.replace('images', 'labels')
366
-
367
  pbar = tqdm(self.label_files)
368
  for i, file in enumerate(pbar):
369
- if labels_loaded:
370
- l = self.labels[i]
371
- # np.savetxt(file, l, '%g') # save *.txt from *.npy file
372
- else:
373
- try:
374
- with open(file, 'r') as f:
375
- l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
376
- except:
377
- nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
378
- continue
379
-
380
  if l.shape[0]:
381
  assert l.shape[1] == 5, '> 5 label columns: %s' % file
382
  assert (l >= 0).all(), 'negative labels: %s' % file
@@ -422,15 +409,13 @@ class LoadImagesAndLabels(Dataset): # for training/testing
422
  ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
423
  # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
424
 
425
- pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
426
- s, nf, nm, ne, nd, n)
427
- assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
428
- if not labels_loaded and n > 1000:
429
- print('Saving labels to %s for faster future loading' % np_labels_path)
430
- np.save(np_labels_path, self.labels) # save for next time
431
 
432
  # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
433
- if cache_images: # if training
 
434
  gb = 0 # Gigabytes of cached images
435
  pbar = tqdm(range(len(self.img_files)), desc='Caching images')
436
  self.img_hw0, self.img_hw = [None] * n, [None] * n
@@ -439,15 +424,30 @@ class LoadImagesAndLabels(Dataset): # for training/testing
439
  gb += self.imgs[i].nbytes
440
  pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
441
 
442
- # Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
443
- detect_corrupted_images = False
444
- if detect_corrupted_images:
445
- from skimage import io # conda install -c conda-forge scikit-image
446
- for file in tqdm(self.img_files, desc='Detecting corrupted images'):
447
- try:
448
- _ = io.imread(file)
449
- except:
450
- print('Corrupted image detected: %s' % file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
 
452
  def __len__(self):
453
  return len(self.img_files)
@@ -679,8 +679,8 @@ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale
679
  dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
680
  elif scaleFill: # stretch
681
  dw, dh = 0.0, 0.0
682
- new_unpad = new_shape
683
- ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
684
 
685
  dw /= 2 # divide padding into 2 sides
686
  dh /= 2
 
26
  break
27
 
28
 
29
+ def get_hash(files):
30
+ # Returns a single hash value of a list of files
31
+ return sum(os.path.getsize(f) for f in files)
32
+
33
+
34
  def exif_size(img):
35
  # Returns exif-corrected PIL size
36
  s = img.size # (width, height)
 
53
  rect=rect, # rectangular training
54
  cache_images=cache,
55
  single_cls=opt.single_cls,
56
+ stride=int(stride),
57
  pad=pad)
58
 
59
  batch_size = min(batch_size, len(dataset))
 
285
  def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
286
  cache_images=False, single_cls=False, stride=32, pad=0.0):
287
  try:
288
+ f = [] # image files
289
+ for p in path if isinstance(path, list) else [path]:
290
+ p = str(Path(p)) # os-agnostic
291
+ parent = str(Path(p).parent) + os.sep
292
+ if os.path.isfile(p): # file
293
+ with open(p, 'r') as t:
294
+ t = t.read().splitlines()
295
+ f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
296
+ elif os.path.isdir(p): # folder
297
+ f += glob.iglob(p + os.sep + '*.*')
298
+ else:
299
+ raise Exception('%s does not exist' % p)
300
  self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
301
+ except Exception as e:
302
+ raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
303
 
304
  n = len(self.img_files)
305
  assert n > 0, 'No images found in %s. See %s' % (path, help_url)
 
318
  self.stride = stride
319
 
320
  # Define labels
321
+ self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
322
+ self.img_files]
323
+
324
+ # Check cache
325
+ cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
326
+ if os.path.isfile(cache_path):
327
+ cache = torch.load(cache_path) # load
328
+ if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
329
+ cache = self.cache_labels(cache_path) # re-cache
330
+ else:
331
+ cache = self.cache_labels(cache_path) # cache
 
332
 
333
+ # Get labels
334
+ labels, shapes = zip(*[cache[x] for x in self.img_files])
335
+ self.shapes = np.array(shapes, dtype=np.float64)
336
+ self.labels = list(labels)
337
 
338
  # Rectangular Training https://github.com/ultralytics/yolov3/issues/232
339
  if self.rect:
 
359
  self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
360
 
361
  # Cache labels
 
 
362
  create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
363
  nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
 
 
 
 
 
 
 
 
 
 
364
  pbar = tqdm(self.label_files)
365
  for i, file in enumerate(pbar):
366
+ l = self.labels[i] # label
 
 
 
 
 
 
 
 
 
 
367
  if l.shape[0]:
368
  assert l.shape[1] == 5, '> 5 label columns: %s' % file
369
  assert (l >= 0).all(), 'negative labels: %s' % file
 
409
  ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
410
  # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
411
 
412
+ pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
413
+ cache_path, nf, nm, ne, nd, n)
414
+ assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
 
 
 
415
 
416
  # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
417
+ self.imgs = [None] * n
418
+ if cache_images:
419
  gb = 0 # Gigabytes of cached images
420
  pbar = tqdm(range(len(self.img_files)), desc='Caching images')
421
  self.img_hw0, self.img_hw = [None] * n, [None] * n
 
424
  gb += self.imgs[i].nbytes
425
  pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
426
 
427
+ def cache_labels(self, path='labels.cache'):
428
+ # Cache dataset labels, check images and read shapes
429
+ x = {} # dict
430
+ pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
431
+ for (img, label) in pbar:
432
+ try:
433
+ l = []
434
+ image = Image.open(img)
435
+ image.verify() # PIL verify
436
+ # _ = io.imread(img) # skimage verify (from skimage import io)
437
+ shape = exif_size(image) # image size
438
+ if os.path.isfile(label):
439
+ with open(label, 'r') as f:
440
+ l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
441
+ if len(l) == 0:
442
+ l = np.zeros((0, 5), dtype=np.float32)
443
+ x[img] = [l, shape]
444
+ except Exception as e:
445
+ x[img] = None
446
+ print('WARNING: %s: %s' % (img, e))
447
+
448
+ x['hash'] = get_hash(self.label_files + self.img_files)
449
+ torch.save(x, path) # save for next time
450
+ return x
451
 
452
  def __len__(self):
453
  return len(self.img_files)
 
679
  dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
680
  elif scaleFill: # stretch
681
  dw, dh = 0.0, 0.0
682
+ new_unpad = (new_shape[1], new_shape[0])
683
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
684
 
685
  dw /= 2 # divide padding into 2 sides
686
  dh /= 2
utils/torch_utils.py CHANGED
@@ -76,16 +76,36 @@ def find_modules(model, mclass=nn.Conv2d):
76
  return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
77
 
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  def fuse_conv_and_bn(conv, bn):
80
  # https://tehnokv.com/posts/fusing-batchnorm-and-conv/
81
  with torch.no_grad():
82
  # init
83
- fusedconv = torch.nn.Conv2d(conv.in_channels,
84
- conv.out_channels,
85
- kernel_size=conv.kernel_size,
86
- stride=conv.stride,
87
- padding=conv.padding,
88
- bias=True)
89
 
90
  # prepare filters
91
  w_conv = conv.weight.clone().view(conv.out_channels, -1)
@@ -93,10 +113,7 @@ def fuse_conv_and_bn(conv, bn):
93
  fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
94
 
95
  # prepare spatial bias
96
- if conv.bias is not None:
97
- b_conv = conv.bias
98
- else:
99
- b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device)
100
  b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
101
  fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
102
 
@@ -139,8 +156,8 @@ def load_classifier(name='resnet101', n=2):
139
 
140
  # Reshape output to n classes
141
  filters = model.fc.weight.shape[1]
142
- model.fc.bias = torch.nn.Parameter(torch.zeros(n), requires_grad=True)
143
- model.fc.weight = torch.nn.Parameter(torch.zeros(n, filters), requires_grad=True)
144
  model.fc.out_features = n
145
  return model
146
 
@@ -174,15 +191,11 @@ class ModelEMA:
174
  I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.
175
  """
176
 
177
- def __init__(self, model, decay=0.9999, device=''):
178
  # Create EMA
179
- self.ema = deepcopy(model.module if is_parallel(model) else model) # FP32 EMA
180
- self.ema.eval()
181
- self.updates = 0 # number of EMA updates
182
  self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
183
- self.device = device # perform ema on different device from model if set
184
- if device:
185
- self.ema.to(device)
186
  for p in self.ema.parameters():
187
  p.requires_grad_(False)
188
 
 
76
  return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
77
 
78
 
79
+ def sparsity(model):
80
+ # Return global model sparsity
81
+ a, b = 0., 0.
82
+ for p in model.parameters():
83
+ a += p.numel()
84
+ b += (p == 0).sum()
85
+ return b / a
86
+
87
+
88
+ def prune(model, amount=0.3):
89
+ # Prune model to requested global sparsity
90
+ import torch.nn.utils.prune as prune
91
+ print('Pruning model... ', end='')
92
+ for name, m in model.named_modules():
93
+ if isinstance(m, nn.Conv2d):
94
+ prune.l1_unstructured(m, name='weight', amount=amount) # prune
95
+ prune.remove(m, 'weight') # make permanent
96
+ print(' %.3g global sparsity' % sparsity(model))
97
+
98
+
99
  def fuse_conv_and_bn(conv, bn):
100
  # https://tehnokv.com/posts/fusing-batchnorm-and-conv/
101
  with torch.no_grad():
102
  # init
103
+ fusedconv = nn.Conv2d(conv.in_channels,
104
+ conv.out_channels,
105
+ kernel_size=conv.kernel_size,
106
+ stride=conv.stride,
107
+ padding=conv.padding,
108
+ bias=True).to(conv.weight.device)
109
 
110
  # prepare filters
111
  w_conv = conv.weight.clone().view(conv.out_channels, -1)
 
113
  fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
114
 
115
  # prepare spatial bias
116
+ b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
 
 
 
117
  b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
118
  fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
119
 
 
156
 
157
  # Reshape output to n classes
158
  filters = model.fc.weight.shape[1]
159
+ model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
160
+ model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
161
  model.fc.out_features = n
162
  return model
163
 
 
191
  I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.
192
  """
193
 
194
+ def __init__(self, model, decay=0.9999, updates=0):
195
  # Create EMA
196
+ self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
197
+ self.updates = updates # number of EMA updates
 
198
  self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
 
 
 
199
  for p in self.ema.parameters():
200
  p.requires_grad_(False)
201
 
utils/utils.py CHANGED
@@ -37,6 +37,12 @@ def init_seeds(seed=0):
37
  torch_utils.init_seeds(seed=seed)
38
 
39
 
 
 
 
 
 
 
40
  def check_git_status():
41
  # Suggest 'git pull' if repo is out of date
42
  if platform in ['linux', 'darwin']:
@@ -173,7 +179,7 @@ def xywh2xyxy(x):
173
  def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
174
  # Rescale coords (xyxy) from img1_shape to img0_shape
175
  if ratio_pad is None: # calculate from img0_shape
176
- gain = max(img1_shape) / max(img0_shape) # gain = old / new
177
  pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
178
  else:
179
  gain = ratio_pad[0][0]
@@ -898,6 +904,16 @@ def output_to_target(output, width, height):
898
  return np.array(targets)
899
 
900
 
 
 
 
 
 
 
 
 
 
 
901
  # Plotting functions ---------------------------------------------------------------------------------------------------
902
  def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
903
  # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
@@ -1028,7 +1044,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max
1028
  return mosaic
1029
 
1030
 
1031
- def plot_lr_scheduler(optimizer, scheduler, epochs=300):
1032
  # Plot LR simulating training for full epochs
1033
  optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
1034
  y = []
@@ -1042,7 +1058,7 @@ def plot_lr_scheduler(optimizer, scheduler, epochs=300):
1042
  plt.xlim(0, epochs)
1043
  plt.ylim(0)
1044
  plt.tight_layout()
1045
- plt.savefig('LR.png', dpi=200)
1046
 
1047
 
1048
  def plot_test_txt(): # from utils.utils import *; plot_test()
@@ -1107,7 +1123,7 @@ def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_st
1107
  plt.savefig(f.replace('.txt', '.png'), dpi=200)
1108
 
1109
 
1110
- def plot_labels(labels):
1111
  # plot dataset labels
1112
  c, b = labels[:, 0], labels[:, 1:].transpose() # classees, boxes
1113
 
@@ -1128,7 +1144,7 @@ def plot_labels(labels):
1128
  ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
1129
  ax[2].set_xlabel('width')
1130
  ax[2].set_ylabel('height')
1131
- plt.savefig('labels.png', dpi=200)
1132
  plt.close()
1133
 
1134
 
@@ -1174,7 +1190,8 @@ def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_re
1174
  fig.savefig(f.replace('.txt', '.png'), dpi=200)
1175
 
1176
 
1177
- def plot_results(start=0, stop=0, bucket='', id=(), labels=()): # from utils.utils import *; plot_results()
 
1178
  # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
1179
  fig, ax = plt.subplots(2, 5, figsize=(12, 6))
1180
  ax = ax.ravel()
@@ -1184,7 +1201,7 @@ def plot_results(start=0, stop=0, bucket='', id=(), labels=()): # from utils.ut
1184
  os.system('rm -rf storage.googleapis.com')
1185
  files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
1186
  else:
1187
- files = glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')
1188
  for fi, f in enumerate(files):
1189
  try:
1190
  results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
@@ -1205,4 +1222,4 @@ def plot_results(start=0, stop=0, bucket='', id=(), labels=()): # from utils.ut
1205
 
1206
  fig.tight_layout()
1207
  ax[1].legend()
1208
- fig.savefig('results.png', dpi=200)
 
37
  torch_utils.init_seeds(seed=seed)
38
 
39
 
40
+ def get_latest_run(search_dir='./runs'):
41
+ # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
42
+ last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
43
+ return max(last_list, key=os.path.getctime)
44
+
45
+
46
  def check_git_status():
47
  # Suggest 'git pull' if repo is out of date
48
  if platform in ['linux', 'darwin']:
 
179
  def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
180
  # Rescale coords (xyxy) from img1_shape to img0_shape
181
  if ratio_pad is None: # calculate from img0_shape
182
+ gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
183
  pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
184
  else:
185
  gain = ratio_pad[0][0]
 
904
  return np.array(targets)
905
 
906
 
907
+ def increment_dir(dir, comment=''):
908
+ # Increments a directory runs/exp1 --> runs/exp2_comment
909
+ n = 0 # number
910
+ d = sorted(glob.glob(dir + '*')) # directories
911
+ if len(d):
912
+ d = d[-1].replace(dir, '')
913
+ n = int(d[:d.find('_')] if '_' in d else d) + 1 # increment
914
+ return dir + str(n) + ('_' + comment if comment else '')
915
+
916
+
917
  # Plotting functions ---------------------------------------------------------------------------------------------------
918
  def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
919
  # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
 
1044
  return mosaic
1045
 
1046
 
1047
+ def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
1048
  # Plot LR simulating training for full epochs
1049
  optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
1050
  y = []
 
1058
  plt.xlim(0, epochs)
1059
  plt.ylim(0)
1060
  plt.tight_layout()
1061
+ plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
1062
 
1063
 
1064
  def plot_test_txt(): # from utils.utils import *; plot_test()
 
1123
  plt.savefig(f.replace('.txt', '.png'), dpi=200)
1124
 
1125
 
1126
+ def plot_labels(labels, save_dir=''):
1127
  # plot dataset labels
1128
  c, b = labels[:, 0], labels[:, 1:].transpose() # classees, boxes
1129
 
 
1144
  ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
1145
  ax[2].set_xlabel('width')
1146
  ax[2].set_ylabel('height')
1147
+ plt.savefig(Path(save_dir) / 'labels.png', dpi=200)
1148
  plt.close()
1149
 
1150
 
 
1190
  fig.savefig(f.replace('.txt', '.png'), dpi=200)
1191
 
1192
 
1193
+ def plot_results(start=0, stop=0, bucket='', id=(), labels=(),
1194
+ save_dir=''): # from utils.utils import *; plot_results()
1195
  # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
1196
  fig, ax = plt.subplots(2, 5, figsize=(12, 6))
1197
  ax = ax.ravel()
 
1201
  os.system('rm -rf storage.googleapis.com')
1202
  files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
1203
  else:
1204
+ files = glob.glob(str(Path(save_dir) / 'results*.txt')) + glob.glob('../../Downloads/results*.txt')
1205
  for fi, f in enumerate(files):
1206
  try:
1207
  results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
 
1222
 
1223
  fig.tight_layout()
1224
  ax[1].legend()
1225
+ fig.savefig(Path(save_dir) / 'results.png', dpi=200)