Merge branch 'master' into advanced_logging
Browse files- .github/ISSUE_TEMPLATE/--bug-report.md +24 -10
- Dockerfile +4 -4
- README.md +24 -18
- detect.py +14 -6
- models/common.py +2 -1
- models/{yolov3-spp.yaml → hub/yolov3-spp.yaml} +1 -2
- models/hub/yolov5-fpn.yaml +45 -0
- models/hub/yolov5-panet.yaml +52 -0
- models/onnx_export.py +2 -2
- models/yolo.py +9 -8
- models/yolov5l.yaml +28 -21
- models/yolov5m.yaml +28 -21
- models/yolov5s.yaml +28 -21
- models/yolov5x.yaml +28 -21
- requirements.txt +2 -2
- test.py +24 -33
- train.py +24 -36
- utils/datasets.py +26 -5
- utils/torch_utils.py +12 -7
- utils/utils.py +104 -79
.github/ISSUE_TEMPLATE/--bug-report.md
CHANGED
@@ -7,29 +7,43 @@ assignees: ''
|
|
7 |
|
8 |
---
|
9 |
|
10 |
-
Before submitting a bug report, please
|
11 |
-
-
|
12 |
-
-
|
13 |
-
-
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
If this is a custom training question we suggest you include your `train*.jpg`, `test*.jpg` and `results.png` figures.
|
18 |
|
19 |
|
20 |
## 🐛 Bug
|
21 |
A clear and concise description of what the bug is.
|
22 |
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
```
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
27 |
```
|
28 |
|
29 |
|
30 |
## Expected behavior
|
31 |
A clear and concise description of what you expected to happen.
|
32 |
|
|
|
33 |
## Environment
|
34 |
If applicable, add screenshots to help explain your problem.
|
35 |
|
|
|
7 |
|
8 |
---
|
9 |
|
10 |
+
Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, otherwise it is non-actionable, and we can not help you:
|
11 |
+
- **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo
|
12 |
+
- **Common dataset**: coco.yaml or coco128.yaml
|
13 |
+
- **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#reproduce-our-environment
|
14 |
|
15 |
+
If this is a custom dataset/training question you **must include** your `train*.jpg`, `test*.jpg` and `results.png` figures, or we can not help you. You can generate these with `utils.plot_results()`.
|
|
|
|
|
16 |
|
17 |
|
18 |
## 🐛 Bug
|
19 |
A clear and concise description of what the bug is.
|
20 |
|
21 |
+
|
22 |
+
## To Reproduce (REQUIRED)
|
23 |
+
|
24 |
+
Input:
|
25 |
+
```
|
26 |
+
import torch
|
27 |
+
|
28 |
+
a = torch.tensor([5])
|
29 |
+
c = a / 0
|
30 |
+
```
|
31 |
+
|
32 |
+
Output:
|
33 |
```
|
34 |
+
Traceback (most recent call last):
|
35 |
+
File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code
|
36 |
+
exec(code_obj, self.user_global_ns, self.user_ns)
|
37 |
+
File "<ipython-input-5-be04c762b799>", line 5, in <module>
|
38 |
+
c = a / 0
|
39 |
+
RuntimeError: ZeroDivisionError
|
40 |
```
|
41 |
|
42 |
|
43 |
## Expected behavior
|
44 |
A clear and concise description of what you expected to happen.
|
45 |
|
46 |
+
|
47 |
## Environment
|
48 |
If applicable, add screenshots to help explain your problem.
|
49 |
|
Dockerfile
CHANGED
@@ -1,9 +1,6 @@
|
|
1 |
# Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
|
2 |
FROM nvcr.io/nvidia/pytorch:20.03-py3
|
3 |
-
|
4 |
-
# Install dependencies (pip or conda)
|
5 |
RUN pip install -U gsutil
|
6 |
-
# RUN pip install -U -r requirements.txt
|
7 |
|
8 |
# Create working directory
|
9 |
RUN mkdir -p /usr/src/app
|
@@ -12,6 +9,9 @@ WORKDIR /usr/src/app
|
|
12 |
# Copy contents
|
13 |
COPY . /usr/src/app
|
14 |
|
|
|
|
|
|
|
15 |
# Copy weights
|
16 |
#RUN python3 -c "from models import *; \
|
17 |
#attempt_download('weights/yolov5s.pt'); \
|
@@ -41,7 +41,7 @@ COPY . /usr/src/app
|
|
41 |
|
42 |
# Bash into running container
|
43 |
# sudo docker container exec -it ba65811811ab bash
|
44 |
-
# python -c "from utils.utils import *;
|
45 |
|
46 |
# Bash into stopped container
|
47 |
# sudo docker commit 6d525e299258 user/test_image && sudo docker run -it --gpus all --ipc=host -v "$(pwd)"/coco:/usr/src/coco --entrypoint=sh user/test_image
|
|
|
1 |
# Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
|
2 |
FROM nvcr.io/nvidia/pytorch:20.03-py3
|
|
|
|
|
3 |
RUN pip install -U gsutil
|
|
|
4 |
|
5 |
# Create working directory
|
6 |
RUN mkdir -p /usr/src/app
|
|
|
9 |
# Copy contents
|
10 |
COPY . /usr/src/app
|
11 |
|
12 |
+
# Install dependencies (pip or conda)
|
13 |
+
#RUN pip install -r requirements.txt
|
14 |
+
|
15 |
# Copy weights
|
16 |
#RUN python3 -c "from models import *; \
|
17 |
#attempt_download('weights/yolov5s.pt'); \
|
|
|
41 |
|
42 |
# Bash into running container
|
43 |
# sudo docker container exec -it ba65811811ab bash
|
44 |
+
# python -c "from utils.utils import *; create_pretrained('weights/last.pt')" && gsutil cp weights/pretrained.pt gs://*
|
45 |
|
46 |
# Bash into stopped container
|
47 |
# sudo docker commit 6d525e299258 user/test_image && sudo docker run -it --gpus all --ipc=host -v "$(pwd)"/coco:/usr/src/coco --entrypoint=sh user/test_image
|
README.md
CHANGED
@@ -4,26 +4,29 @@
|
|
4 |
|
5 |
This repository represents Ultralytics open-source research into future object detection methods, and incorporates our lessons learned and best practices evolved over training thousands of models on custom client datasets with our previous YOLO repository https://github.com/ultralytics/yolov3. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk.
|
6 |
|
7 |
-
<img src="https://user-images.githubusercontent.com/26833433/
|
8 |
|
9 |
-
- **June
|
10 |
-
- **
|
11 |
-
- **
|
|
|
|
|
12 |
|
13 |
|
14 |
## Pretrained Checkpoints
|
15 |
|
16 |
-
| Model | AP<sup>val</sup> | AP<sup>test</sup> | AP<sub>50</sub> |
|
17 |
|---------- |------ |------ |------ | -------- | ------| ------ |------ | :------: |
|
18 |
-
|
|
19 |
-
|
|
20 |
-
|
|
21 |
-
|
|
22 |
-
| YOLOv3-SPP
|
|
|
23 |
|
24 |
** AP<sup>test</sup> denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results in the table denote val2017 accuracy.
|
25 |
** All AP numbers are for single-model single-scale without ensemble or test-time augmentation. Reproduce by `python test.py --img 736 --conf 0.001`
|
26 |
-
**
|
27 |
** All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation).
|
28 |
|
29 |
|
@@ -37,10 +40,10 @@ $ pip install -U -r requirements.txt
|
|
37 |
|
38 |
## Tutorials
|
39 |
|
40 |
-
* <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
41 |
* [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)
|
42 |
* [Google Cloud Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)
|
43 |
-
* [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart)
|
44 |
|
45 |
|
46 |
## Inference
|
@@ -74,9 +77,12 @@ Results saved to /content/yolov5/inference/output
|
|
74 |
|
75 |
## Reproduce Our Training
|
76 |
|
77 |
-
|
78 |
```bash
|
79 |
-
$ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size
|
|
|
|
|
|
|
80 |
```
|
81 |
<img src="https://user-images.githubusercontent.com/26833433/84186698-c4d54d00-aa45-11ea-9bde-c632c1230ccd.png" width="900">
|
82 |
|
@@ -85,20 +91,20 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size
|
|
85 |
|
86 |
To access an up-to-date working environment (with all dependencies including CUDA/CUDNN, Python and PyTorch preinstalled), consider a:
|
87 |
|
88 |
-
- **
|
89 |
- **Google Colab Notebook** with 12 hours of free GPU time. <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
90 |
- **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) 
|
91 |
|
92 |
|
93 |
## Citation
|
94 |
|
95 |
-
[ updates: new heads, reduced parameters, faster inference and improved mAP [364fcfd](https://github.com/ultralytics/yolov5/commit/364fcfd7dba53f46edd4f04c037a039c0a287972).
|
10 |
+
- **June 19, 2020**: [FP16](https://pytorch.org/docs/stable/nn.html#torch.nn.Module.half) as new default for smaller checkpoints and faster inference [d4c6674](https://github.com/ultralytics/yolov5/commit/d4c6674c98e19df4c40e33a777610a18d1961145).
|
11 |
+
- **June 9, 2020**: [CSP](https://github.com/WongKinYiu/CrossStagePartialNetworks) updates: improved speed, size, and accuracy (credit to @WongKinYiu for CSP).
|
12 |
+
- **May 27, 2020**: Public release of repo. YOLOv5 models are SOTA among all known YOLO implementations.
|
13 |
+
- **April 1, 2020**: Start development of future [YOLOv3](https://github.com/ultralytics/yolov3)/[YOLOv4](https://github.com/AlexeyAB/darknet)-based PyTorch models in a range of compound-scaled sizes.
|
14 |
|
15 |
|
16 |
## Pretrained Checkpoints
|
17 |
|
18 |
+
| Model | AP<sup>val</sup> | AP<sup>test</sup> | AP<sub>50</sub> | Speed<sub>GPU</sub> | FPS<sub>GPU</sub> || params | FLOPS |
|
19 |
|---------- |------ |------ |------ | -------- | ------| ------ |------ | :------: |
|
20 |
+
| [YOLOv5s](https://drive.google.com/open?id=1Drs_Aiu7xx6S-ix95f9kNsA6ueKRpN2J) | 36.6 | 36.6 | 55.8 | **2.1ms** | **476** || 7.5M | 13.2B
|
21 |
+
| [YOLOv5m](https://drive.google.com/open?id=1Drs_Aiu7xx6S-ix95f9kNsA6ueKRpN2J) | 43.4 | 43.4 | 62.4 | 3.0ms | 333 || 21.8M | 39.4B
|
22 |
+
| [YOLOv5l](https://drive.google.com/open?id=1Drs_Aiu7xx6S-ix95f9kNsA6ueKRpN2J) | 46.6 | 46.7 | 65.4 | 3.9ms | 256 || 47.8M | 88.1B
|
23 |
+
| [YOLOv5x](https://drive.google.com/open?id=1Drs_Aiu7xx6S-ix95f9kNsA6ueKRpN2J) | **48.4** | **48.4** | **66.9** | 6.1ms | 164 || 89.0M | 166.4B
|
24 |
+
| [YOLOv3-SPP](https://drive.google.com/open?id=1Drs_Aiu7xx6S-ix95f9kNsA6ueKRpN2J) | 45.6 | 45.5 | 65.2 | 4.5ms | 222 || 63.0M | 118.0B
|
25 |
+
|
26 |
|
27 |
** AP<sup>test</sup> denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results in the table denote val2017 accuracy.
|
28 |
** All AP numbers are for single-model single-scale without ensemble or test-time augmentation. Reproduce by `python test.py --img 736 --conf 0.001`
|
29 |
+
** Speed<sub>GPU</sub> measures end-to-end time per image averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) instance with one V100 GPU, and includes image preprocessing, PyTorch FP16 image inference at --batch-size 32 --img-size 640, postprocessing and NMS. Average NMS time included in this chart is 1-2ms/img. Reproduce by `python test.py --img 640 --conf 0.1`
|
30 |
** All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation).
|
31 |
|
32 |
|
|
|
40 |
|
41 |
## Tutorials
|
42 |
|
43 |
+
* [Notebook](https://github.com/ultralytics/yolov5/blob/master/tutorial.ipynb) <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
44 |
* [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)
|
45 |
* [Google Cloud Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)
|
46 |
+
* [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) 
|
47 |
|
48 |
|
49 |
## Inference
|
|
|
77 |
|
78 |
## Reproduce Our Training
|
79 |
|
80 |
+
Download [COCO](https://github.com/ultralytics/yolov5/blob/master/data/get_coco2017.sh), install [Apex](https://github.com/NVIDIA/apex) and run command below. Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices).
|
81 |
```bash
|
82 |
+
$ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64
|
83 |
+
yolov5m 48
|
84 |
+
yolov5l 32
|
85 |
+
yolov5x 16
|
86 |
```
|
87 |
<img src="https://user-images.githubusercontent.com/26833433/84186698-c4d54d00-aa45-11ea-9bde-c632c1230ccd.png" width="900">
|
88 |
|
|
|
91 |
|
92 |
To access an up-to-date working environment (with all dependencies including CUDA/CUDNN, Python and PyTorch preinstalled), consider a:
|
93 |
|
94 |
+
- **Google Cloud** Deep Learning VM with $300 free credit offer: See our [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)
|
95 |
- **Google Colab Notebook** with 12 hours of free GPU time. <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
96 |
- **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) 
|
97 |
|
98 |
|
99 |
## Citation
|
100 |
|
101 |
+
[](https://zenodo.org/badge/latestdoi/264818686)
|
102 |
|
103 |
|
104 |
## About Us
|
105 |
|
106 |
Ultralytics is a U.S.-based particle physics and AI startup with over 6 years of expertise supporting government, academic and business clients. We offer a wide range of vision AI services, spanning from simple expert advice up to delivery of fully customized, end-to-end production solutions, including:
|
107 |
+
- **Cloud-based AI** systems operating on **hundreds of HD video streams in realtime.**
|
108 |
- **Edge AI** integrated into custom iOS and Android apps for realtime **30 FPS video inference.**
|
109 |
- **Custom data training**, hyperparameter evolution, and model exportation to any destination.
|
110 |
|
detect.py
CHANGED
@@ -1,5 +1,8 @@
|
|
1 |
import argparse
|
2 |
|
|
|
|
|
|
|
3 |
from utils.datasets import *
|
4 |
from utils.utils import *
|
5 |
|
@@ -36,14 +39,14 @@ def detect(save_img=False):
|
|
36 |
vid_path, vid_writer = None, None
|
37 |
if webcam:
|
38 |
view_img = True
|
39 |
-
|
40 |
dataset = LoadStreams(source, img_size=imgsz)
|
41 |
else:
|
42 |
save_img = True
|
43 |
dataset = LoadImages(source, img_size=imgsz)
|
44 |
|
45 |
# Get names and colors
|
46 |
-
names = model.names if hasattr(model, '
|
47 |
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
|
48 |
|
49 |
# Run inference
|
@@ -62,8 +65,7 @@ def detect(save_img=False):
|
|
62 |
pred = model(img, augment=opt.augment)[0]
|
63 |
|
64 |
# Apply NMS
|
65 |
-
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres,
|
66 |
-
fast=True, classes=opt.classes, agnostic=opt.agnostic_nms)
|
67 |
t2 = torch_utils.time_synchronized()
|
68 |
|
69 |
# Apply Classifier
|
@@ -78,6 +80,7 @@ def detect(save_img=False):
|
|
78 |
p, s, im0 = path, '', im0s
|
79 |
|
80 |
save_path = str(Path(out) / Path(p).name)
|
|
|
81 |
s += '%gx%g ' % img.shape[2:] # print string
|
82 |
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
83 |
if det is not None and len(det):
|
@@ -93,8 +96,8 @@ def detect(save_img=False):
|
|
93 |
for *xyxy, conf, cls in det:
|
94 |
if save_txt: # Write to file
|
95 |
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
96 |
-
with open(
|
97 |
-
|
98 |
|
99 |
if save_img or view_img: # Add bbox to image
|
100 |
label = '%s %.2f' % (names[int(cls)], conf)
|
@@ -154,3 +157,8 @@ if __name__ == '__main__':
|
|
154 |
|
155 |
with torch.no_grad():
|
156 |
detect()
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import argparse
|
2 |
|
3 |
+
import torch.backends.cudnn as cudnn
|
4 |
+
|
5 |
+
from utils import google_utils
|
6 |
from utils.datasets import *
|
7 |
from utils.utils import *
|
8 |
|
|
|
39 |
vid_path, vid_writer = None, None
|
40 |
if webcam:
|
41 |
view_img = True
|
42 |
+
cudnn.benchmark = True # set True to speed up constant image size inference
|
43 |
dataset = LoadStreams(source, img_size=imgsz)
|
44 |
else:
|
45 |
save_img = True
|
46 |
dataset = LoadImages(source, img_size=imgsz)
|
47 |
|
48 |
# Get names and colors
|
49 |
+
names = model.module.names if hasattr(model, 'module') else model.names
|
50 |
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
|
51 |
|
52 |
# Run inference
|
|
|
65 |
pred = model(img, augment=opt.augment)[0]
|
66 |
|
67 |
# Apply NMS
|
68 |
+
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
|
|
|
69 |
t2 = torch_utils.time_synchronized()
|
70 |
|
71 |
# Apply Classifier
|
|
|
80 |
p, s, im0 = path, '', im0s
|
81 |
|
82 |
save_path = str(Path(out) / Path(p).name)
|
83 |
+
txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
|
84 |
s += '%gx%g ' % img.shape[2:] # print string
|
85 |
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
86 |
if det is not None and len(det):
|
|
|
96 |
for *xyxy, conf, cls in det:
|
97 |
if save_txt: # Write to file
|
98 |
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
99 |
+
with open(txt_path + '.txt', 'a') as f:
|
100 |
+
f.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
|
101 |
|
102 |
if save_img or view_img: # Add bbox to image
|
103 |
label = '%s %.2f' % (names[int(cls)], conf)
|
|
|
157 |
|
158 |
with torch.no_grad():
|
159 |
detect()
|
160 |
+
|
161 |
+
# Update all models
|
162 |
+
# for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov3-spp.pt']:
|
163 |
+
# detect()
|
164 |
+
# create_pretrained(opt.weights, opt.weights)
|
models/common.py
CHANGED
@@ -13,7 +13,8 @@ class Conv(nn.Module):
|
|
13 |
# Standard convolution
|
14 |
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
|
15 |
super(Conv, self).__init__()
|
16 |
-
|
|
|
17 |
self.bn = nn.BatchNorm2d(c2)
|
18 |
self.act = nn.LeakyReLU(0.1, inplace=True) if act else nn.Identity()
|
19 |
|
|
|
13 |
# Standard convolution
|
14 |
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
|
15 |
super(Conv, self).__init__()
|
16 |
+
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # padding
|
17 |
+
self.conv = nn.Conv2d(c1, c2, k, s, p, groups=g, bias=False)
|
18 |
self.bn = nn.BatchNorm2d(c2)
|
19 |
self.act = nn.LeakyReLU(0.1, inplace=True) if act else nn.Identity()
|
20 |
|
models/{yolov3-spp.yaml → hub/yolov3-spp.yaml}
RENAMED
@@ -25,8 +25,7 @@ backbone:
|
|
25 |
[-1, 4, Bottleneck, [1024]], # 10
|
26 |
]
|
27 |
|
28 |
-
#
|
29 |
-
# na = len(anchors[0])
|
30 |
head:
|
31 |
[[-1, 1, Bottleneck, [1024, False]], # 11
|
32 |
[-1, 1, SPP, [512, [5, 9, 13]]],
|
|
|
25 |
[-1, 4, Bottleneck, [1024]], # 10
|
26 |
]
|
27 |
|
28 |
+
# YOLOv3-SPP head
|
|
|
29 |
head:
|
30 |
[[-1, 1, Bottleneck, [1024, False]], # 11
|
31 |
[-1, 1, SPP, [512, [5, 9, 13]]],
|
models/hub/yolov5-fpn.yaml
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 80 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Focus, [64, 3]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, Bottleneck, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 9, BottleneckCSP, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, BottleneckCSP, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
24 |
+
[-1, 6, BottleneckCSP, [1024]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 FPN head
|
28 |
+
head:
|
29 |
+
[[-1, 3, BottleneckCSP, [1024, False]],
|
30 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 11 (P5/32-large)
|
31 |
+
|
32 |
+
[-2, 1, nn.Upsample, [None, 2, 'nearest']],
|
33 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
34 |
+
[-1, 1, Conv, [512, 1, 1]],
|
35 |
+
[-1, 3, BottleneckCSP, [512, False]],
|
36 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 16 (P4/16-medium)
|
37 |
+
|
38 |
+
[-2, 1, nn.Upsample, [None, 2, 'nearest']],
|
39 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
40 |
+
[-1, 1, Conv, [256, 1, 1]],
|
41 |
+
[-1, 3, BottleneckCSP, [256, False]],
|
42 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 21 (P3/8-small)
|
43 |
+
|
44 |
+
[[], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
45 |
+
]
|
models/hub/yolov5-panet.yaml
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 80 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [116,90, 156,198, 373,326] # P5/32
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [10,13, 16,30, 33,23] # P3/8
|
11 |
+
|
12 |
+
# YOLOv5 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Focus, [64, 3]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, BottleneckCSP, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 9, BottleneckCSP, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, BottleneckCSP, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
24 |
+
]
|
25 |
+
|
26 |
+
# YOLOv5 PANet head
|
27 |
+
head:
|
28 |
+
[[-1, 3, BottleneckCSP, [1024, False]],
|
29 |
+
[-1, 1, Conv, [512, 1, 1]], # 10
|
30 |
+
|
31 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
32 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
33 |
+
[-1, 3, BottleneckCSP, [512, False]],
|
34 |
+
[-1, 1, Conv, [256, 1, 1]], # 14
|
35 |
+
|
36 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
37 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
38 |
+
[-1, 3, BottleneckCSP, [256, False]],
|
39 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 18 (P3/8-small)
|
40 |
+
|
41 |
+
[-2, 1, Conv, [256, 3, 2]],
|
42 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
43 |
+
[-1, 3, BottleneckCSP, [512, False]],
|
44 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 22 (P4/16-medium)
|
45 |
+
|
46 |
+
[-2, 1, Conv, [512, 3, 2]],
|
47 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
48 |
+
[-1, 3, BottleneckCSP, [1024, False]],
|
49 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 26 (P5/32-large)
|
50 |
+
|
51 |
+
[[], 1, Detect, [nc, anchors]], # Detect(P5, P4, P3)
|
52 |
+
]
|
models/onnx_export.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
"""Exports a pytorch *.pt model to *.onnx format
|
2 |
|
3 |
Usage:
|
4 |
-
import torch
|
5 |
$ export PYTHONPATH="$PWD" && python models/onnx_export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
|
6 |
"""
|
7 |
|
@@ -10,6 +9,7 @@ import argparse
|
|
10 |
import onnx
|
11 |
|
12 |
from models.common import *
|
|
|
13 |
|
14 |
if __name__ == '__main__':
|
15 |
parser = argparse.ArgumentParser()
|
@@ -25,7 +25,7 @@ if __name__ == '__main__':
|
|
25 |
|
26 |
# Load pytorch model
|
27 |
google_utils.attempt_download(opt.weights)
|
28 |
-
model = torch.load(opt.weights, map_location=torch.device('cpu'))['model']
|
29 |
model.eval()
|
30 |
model.fuse()
|
31 |
|
|
|
1 |
"""Exports a pytorch *.pt model to *.onnx format
|
2 |
|
3 |
Usage:
|
|
|
4 |
$ export PYTHONPATH="$PWD" && python models/onnx_export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
|
5 |
"""
|
6 |
|
|
|
9 |
import onnx
|
10 |
|
11 |
from models.common import *
|
12 |
+
from utils import google_utils
|
13 |
|
14 |
if __name__ == '__main__':
|
15 |
parser = argparse.ArgumentParser()
|
|
|
25 |
|
26 |
# Load pytorch model
|
27 |
google_utils.attempt_download(opt.weights)
|
28 |
+
model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float()
|
29 |
model.eval()
|
30 |
model.fuse()
|
31 |
|
models/yolo.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
import argparse
|
2 |
|
3 |
-
import yaml
|
4 |
-
|
5 |
from models.experimental import *
|
6 |
|
7 |
|
@@ -61,8 +59,9 @@ class Model(nn.Module):
|
|
61 |
|
62 |
# Build strides, anchors
|
63 |
m = self.model[-1] # Detect()
|
64 |
-
m.stride = torch.tensor([
|
65 |
m.anchors /= m.stride.view(-1, 1, 1)
|
|
|
66 |
self.stride = m.stride
|
67 |
|
68 |
# Init weights, biases
|
@@ -97,8 +96,11 @@ class Model(nn.Module):
|
|
97 |
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
98 |
|
99 |
if profile:
|
100 |
-
|
101 |
-
|
|
|
|
|
|
|
102 |
t = torch_utils.time_synchronized()
|
103 |
for _ in range(10):
|
104 |
_ = m(x)
|
@@ -208,7 +210,7 @@ if __name__ == '__main__':
|
|
208 |
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
|
209 |
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
210 |
opt = parser.parse_args()
|
211 |
-
opt.cfg =
|
212 |
device = torch_utils.select_device(opt.device)
|
213 |
|
214 |
# Create model
|
@@ -218,11 +220,10 @@ if __name__ == '__main__':
|
|
218 |
# Profile
|
219 |
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
|
220 |
# y = model(img, profile=True)
|
221 |
-
# print([y[0].shape] + [x.shape for x in y[1]])
|
222 |
|
223 |
# ONNX export
|
224 |
# model.model[-1].export = True
|
225 |
-
# torch.onnx.export(model, img,
|
226 |
|
227 |
# Tensorboard
|
228 |
# from torch.utils.tensorboard import SummaryWriter
|
|
|
1 |
import argparse
|
2 |
|
|
|
|
|
3 |
from models.experimental import *
|
4 |
|
5 |
|
|
|
59 |
|
60 |
# Build strides, anchors
|
61 |
m = self.model[-1] # Detect()
|
62 |
+
m.stride = torch.tensor([128 / x.shape[-2] for x in self.forward(torch.zeros(1, ch, 128, 128))]) # forward
|
63 |
m.anchors /= m.stride.view(-1, 1, 1)
|
64 |
+
check_anchor_order(m)
|
65 |
self.stride = m.stride
|
66 |
|
67 |
# Init weights, biases
|
|
|
96 |
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
97 |
|
98 |
if profile:
|
99 |
+
try:
|
100 |
+
import thop
|
101 |
+
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # FLOPS
|
102 |
+
except:
|
103 |
+
o = 0
|
104 |
t = torch_utils.time_synchronized()
|
105 |
for _ in range(10):
|
106 |
_ = m(x)
|
|
|
210 |
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
|
211 |
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
212 |
opt = parser.parse_args()
|
213 |
+
opt.cfg = check_file(opt.cfg) # check file
|
214 |
device = torch_utils.select_device(opt.device)
|
215 |
|
216 |
# Create model
|
|
|
220 |
# Profile
|
221 |
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
|
222 |
# y = model(img, profile=True)
|
|
|
223 |
|
224 |
# ONNX export
|
225 |
# model.model[-1].export = True
|
226 |
+
# torch.onnx.export(model, img, opt.cfg.replace('.yaml', '.onnx'), verbose=True, opset_version=11)
|
227 |
|
228 |
# Tensorboard
|
229 |
# from torch.utils.tensorboard import SummaryWriter
|
models/yolov5l.yaml
CHANGED
@@ -5,41 +5,48 @@ width_multiple: 1.0 # layer channel multiple
|
|
5 |
|
6 |
# anchors
|
7 |
anchors:
|
8 |
-
- [10,13, 16,30, 33,23] # P3/8
|
9 |
-
- [30,61, 62,45, 59,119] # P4/16
|
10 |
- [116,90, 156,198, 373,326] # P5/32
|
|
|
|
|
11 |
|
12 |
-
#
|
13 |
backbone:
|
14 |
# [from, number, module, args]
|
15 |
-
[[-1, 1, Focus, [64, 3]], #
|
16 |
-
[-1, 1, Conv, [128, 3, 2]], #
|
17 |
-
[-1, 3,
|
18 |
-
[-1, 1, Conv, [256, 3, 2]], #
|
19 |
[-1, 9, BottleneckCSP, [256]],
|
20 |
-
[-1, 1, Conv, [512, 3, 2]], #
|
21 |
[-1, 9, BottleneckCSP, [512]],
|
22 |
-
[-1, 1, Conv, [1024, 3, 2]], #
|
23 |
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
24 |
-
[-1, 6, BottleneckCSP, [1024]], # 10
|
25 |
]
|
26 |
|
27 |
-
#
|
28 |
head:
|
29 |
-
[[-1, 3, BottleneckCSP, [1024, False]], #
|
30 |
-
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 12 (P5/32-large)
|
31 |
|
32 |
-
[-2, 1, nn.Upsample, [None, 2, 'nearest']],
|
33 |
-
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
34 |
[-1, 1, Conv, [512, 1, 1]],
|
35 |
-
[-1,
|
36 |
-
[-1, 1,
|
|
|
37 |
|
38 |
-
[-2, 1, nn.Upsample, [None, 2, 'nearest']],
|
39 |
-
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
40 |
[-1, 1, Conv, [256, 1, 1]],
|
|
|
|
|
41 |
[-1, 3, BottleneckCSP, [256, False]],
|
42 |
-
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
-
[[], 1, Detect, [nc, anchors]], # Detect(
|
45 |
]
|
|
|
5 |
|
6 |
# anchors
|
7 |
anchors:
|
|
|
|
|
8 |
- [116,90, 156,198, 373,326] # P5/32
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [10,13, 16,30, 33,23] # P3/8
|
11 |
|
12 |
+
# YOLOv5 backbone
|
13 |
backbone:
|
14 |
# [from, number, module, args]
|
15 |
+
[[-1, 1, Focus, [64, 3]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, BottleneckCSP, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
[-1, 9, BottleneckCSP, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
[-1, 9, BottleneckCSP, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
|
|
24 |
]
|
25 |
|
26 |
+
# YOLOv5 head
|
27 |
head:
|
28 |
+
[[-1, 3, BottleneckCSP, [1024, False]], # 9
|
|
|
29 |
|
|
|
|
|
30 |
[-1, 1, Conv, [512, 1, 1]],
|
31 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
32 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
33 |
+
[-1, 3, BottleneckCSP, [512, False]], # 13
|
34 |
|
|
|
|
|
35 |
[-1, 1, Conv, [256, 1, 1]],
|
36 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
37 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
38 |
[-1, 3, BottleneckCSP, [256, False]],
|
39 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 18 (P3/8-small)
|
40 |
+
|
41 |
+
[-2, 1, Conv, [256, 3, 2]],
|
42 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
43 |
+
[-1, 3, BottleneckCSP, [512, False]],
|
44 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 22 (P4/16-medium)
|
45 |
+
|
46 |
+
[-2, 1, Conv, [512, 3, 2]],
|
47 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
48 |
+
[-1, 3, BottleneckCSP, [1024, False]],
|
49 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 26 (P5/32-large)
|
50 |
|
51 |
+
[[], 1, Detect, [nc, anchors]], # Detect(P5, P4, P3)
|
52 |
]
|
models/yolov5m.yaml
CHANGED
@@ -5,41 +5,48 @@ width_multiple: 0.75 # layer channel multiple
|
|
5 |
|
6 |
# anchors
|
7 |
anchors:
|
8 |
-
- [10,13, 16,30, 33,23] # P3/8
|
9 |
-
- [30,61, 62,45, 59,119] # P4/16
|
10 |
- [116,90, 156,198, 373,326] # P5/32
|
|
|
|
|
11 |
|
12 |
-
#
|
13 |
backbone:
|
14 |
# [from, number, module, args]
|
15 |
-
[[-1, 1, Focus, [64, 3]], #
|
16 |
-
[-1, 1, Conv, [128, 3, 2]], #
|
17 |
-
[-1, 3,
|
18 |
-
[-1, 1, Conv, [256, 3, 2]], #
|
19 |
[-1, 9, BottleneckCSP, [256]],
|
20 |
-
[-1, 1, Conv, [512, 3, 2]], #
|
21 |
[-1, 9, BottleneckCSP, [512]],
|
22 |
-
[-1, 1, Conv, [1024, 3, 2]], #
|
23 |
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
24 |
-
[-1, 6, BottleneckCSP, [1024]], # 10
|
25 |
]
|
26 |
|
27 |
-
#
|
28 |
head:
|
29 |
-
[[-1, 3, BottleneckCSP, [1024, False]], #
|
30 |
-
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 12 (P5/32-large)
|
31 |
|
32 |
-
[-2, 1, nn.Upsample, [None, 2, 'nearest']],
|
33 |
-
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
34 |
[-1, 1, Conv, [512, 1, 1]],
|
35 |
-
[-1,
|
36 |
-
[-1, 1,
|
|
|
37 |
|
38 |
-
[-2, 1, nn.Upsample, [None, 2, 'nearest']],
|
39 |
-
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
40 |
[-1, 1, Conv, [256, 1, 1]],
|
|
|
|
|
41 |
[-1, 3, BottleneckCSP, [256, False]],
|
42 |
-
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
-
[[], 1, Detect, [nc, anchors]], # Detect(
|
45 |
]
|
|
|
5 |
|
6 |
# anchors
|
7 |
anchors:
|
|
|
|
|
8 |
- [116,90, 156,198, 373,326] # P5/32
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [10,13, 16,30, 33,23] # P3/8
|
11 |
|
12 |
+
# YOLOv5 backbone
|
13 |
backbone:
|
14 |
# [from, number, module, args]
|
15 |
+
[[-1, 1, Focus, [64, 3]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, BottleneckCSP, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
[-1, 9, BottleneckCSP, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
[-1, 9, BottleneckCSP, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
|
|
24 |
]
|
25 |
|
26 |
+
# YOLOv5 head
|
27 |
head:
|
28 |
+
[[-1, 3, BottleneckCSP, [1024, False]], # 9
|
|
|
29 |
|
|
|
|
|
30 |
[-1, 1, Conv, [512, 1, 1]],
|
31 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
32 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
33 |
+
[-1, 3, BottleneckCSP, [512, False]], # 13
|
34 |
|
|
|
|
|
35 |
[-1, 1, Conv, [256, 1, 1]],
|
36 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
37 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
38 |
[-1, 3, BottleneckCSP, [256, False]],
|
39 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 18 (P3/8-small)
|
40 |
+
|
41 |
+
[-2, 1, Conv, [256, 3, 2]],
|
42 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
43 |
+
[-1, 3, BottleneckCSP, [512, False]],
|
44 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 22 (P4/16-medium)
|
45 |
+
|
46 |
+
[-2, 1, Conv, [512, 3, 2]],
|
47 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
48 |
+
[-1, 3, BottleneckCSP, [1024, False]],
|
49 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 26 (P5/32-large)
|
50 |
|
51 |
+
[[], 1, Detect, [nc, anchors]], # Detect(P5, P4, P3)
|
52 |
]
|
models/yolov5s.yaml
CHANGED
@@ -5,41 +5,48 @@ width_multiple: 0.50 # layer channel multiple
|
|
5 |
|
6 |
# anchors
|
7 |
anchors:
|
8 |
-
- [10,13, 16,30, 33,23] # P3/8
|
9 |
-
- [30,61, 62,45, 59,119] # P4/16
|
10 |
- [116,90, 156,198, 373,326] # P5/32
|
|
|
|
|
11 |
|
12 |
-
#
|
13 |
backbone:
|
14 |
# [from, number, module, args]
|
15 |
-
[[-1, 1, Focus, [64, 3]], #
|
16 |
-
[-1, 1, Conv, [128, 3, 2]], #
|
17 |
-
[-1, 3,
|
18 |
-
[-1, 1, Conv, [256, 3, 2]], #
|
19 |
[-1, 9, BottleneckCSP, [256]],
|
20 |
-
[-1, 1, Conv, [512, 3, 2]], #
|
21 |
[-1, 9, BottleneckCSP, [512]],
|
22 |
-
[-1, 1, Conv, [1024, 3, 2]], #
|
23 |
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
24 |
-
[-1, 6, BottleneckCSP, [1024]], # 10
|
25 |
]
|
26 |
|
27 |
-
#
|
28 |
head:
|
29 |
-
[[-1, 3, BottleneckCSP, [1024, False]], #
|
30 |
-
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 12 (P5/32-large)
|
31 |
|
32 |
-
[-2, 1, nn.Upsample, [None, 2, 'nearest']],
|
33 |
-
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
34 |
[-1, 1, Conv, [512, 1, 1]],
|
35 |
-
[-1,
|
36 |
-
[-1, 1,
|
|
|
37 |
|
38 |
-
[-2, 1, nn.Upsample, [None, 2, 'nearest']],
|
39 |
-
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
40 |
[-1, 1, Conv, [256, 1, 1]],
|
|
|
|
|
41 |
[-1, 3, BottleneckCSP, [256, False]],
|
42 |
-
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
-
[[], 1, Detect, [nc, anchors]], # Detect(
|
45 |
]
|
|
|
5 |
|
6 |
# anchors
|
7 |
anchors:
|
|
|
|
|
8 |
- [116,90, 156,198, 373,326] # P5/32
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [10,13, 16,30, 33,23] # P3/8
|
11 |
|
12 |
+
# YOLOv5 backbone
|
13 |
backbone:
|
14 |
# [from, number, module, args]
|
15 |
+
[[-1, 1, Focus, [64, 3]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, BottleneckCSP, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
[-1, 9, BottleneckCSP, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
[-1, 9, BottleneckCSP, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
|
|
24 |
]
|
25 |
|
26 |
+
# YOLOv5 head
|
27 |
head:
|
28 |
+
[[-1, 3, BottleneckCSP, [1024, False]], # 9
|
|
|
29 |
|
|
|
|
|
30 |
[-1, 1, Conv, [512, 1, 1]],
|
31 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
32 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
33 |
+
[-1, 3, BottleneckCSP, [512, False]], # 13
|
34 |
|
|
|
|
|
35 |
[-1, 1, Conv, [256, 1, 1]],
|
36 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
37 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
38 |
[-1, 3, BottleneckCSP, [256, False]],
|
39 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 18 (P3/8-small)
|
40 |
+
|
41 |
+
[-2, 1, Conv, [256, 3, 2]],
|
42 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
43 |
+
[-1, 3, BottleneckCSP, [512, False]],
|
44 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 22 (P4/16-medium)
|
45 |
+
|
46 |
+
[-2, 1, Conv, [512, 3, 2]],
|
47 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
48 |
+
[-1, 3, BottleneckCSP, [1024, False]],
|
49 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 26 (P5/32-large)
|
50 |
|
51 |
+
[[], 1, Detect, [nc, anchors]], # Detect(P5, P4, P3)
|
52 |
]
|
models/yolov5x.yaml
CHANGED
@@ -5,41 +5,48 @@ width_multiple: 1.25 # layer channel multiple
|
|
5 |
|
6 |
# anchors
|
7 |
anchors:
|
8 |
-
- [10,13, 16,30, 33,23] # P3/8
|
9 |
-
- [30,61, 62,45, 59,119] # P4/16
|
10 |
- [116,90, 156,198, 373,326] # P5/32
|
|
|
|
|
11 |
|
12 |
-
#
|
13 |
backbone:
|
14 |
# [from, number, module, args]
|
15 |
-
[[-1, 1, Focus, [64, 3]], #
|
16 |
-
[-1, 1, Conv, [128, 3, 2]], #
|
17 |
-
[-1, 3,
|
18 |
-
[-1, 1, Conv, [256, 3, 2]], #
|
19 |
[-1, 9, BottleneckCSP, [256]],
|
20 |
-
[-1, 1, Conv, [512, 3, 2]], #
|
21 |
[-1, 9, BottleneckCSP, [512]],
|
22 |
-
[-1, 1, Conv, [1024, 3, 2]], #
|
23 |
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
24 |
-
[-1, 6, BottleneckCSP, [1024]], # 10
|
25 |
]
|
26 |
|
27 |
-
#
|
28 |
head:
|
29 |
-
[[-1, 3, BottleneckCSP, [1024, False]], #
|
30 |
-
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 12 (P5/32-large)
|
31 |
|
32 |
-
[-2, 1, nn.Upsample, [None, 2, 'nearest']],
|
33 |
-
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
34 |
[-1, 1, Conv, [512, 1, 1]],
|
35 |
-
[-1,
|
36 |
-
[-1, 1,
|
|
|
37 |
|
38 |
-
[-2, 1, nn.Upsample, [None, 2, 'nearest']],
|
39 |
-
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
40 |
[-1, 1, Conv, [256, 1, 1]],
|
|
|
|
|
41 |
[-1, 3, BottleneckCSP, [256, False]],
|
42 |
-
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
-
[[], 1, Detect, [nc, anchors]], # Detect(
|
45 |
]
|
|
|
5 |
|
6 |
# anchors
|
7 |
anchors:
|
|
|
|
|
8 |
- [116,90, 156,198, 373,326] # P5/32
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [10,13, 16,30, 33,23] # P3/8
|
11 |
|
12 |
+
# YOLOv5 backbone
|
13 |
backbone:
|
14 |
# [from, number, module, args]
|
15 |
+
[[-1, 1, Focus, [64, 3]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, BottleneckCSP, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
[-1, 9, BottleneckCSP, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
[-1, 9, BottleneckCSP, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
|
|
24 |
]
|
25 |
|
26 |
+
# YOLOv5 head
|
27 |
head:
|
28 |
+
[[-1, 3, BottleneckCSP, [1024, False]], # 9
|
|
|
29 |
|
|
|
|
|
30 |
[-1, 1, Conv, [512, 1, 1]],
|
31 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
32 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
33 |
+
[-1, 3, BottleneckCSP, [512, False]], # 13
|
34 |
|
|
|
|
|
35 |
[-1, 1, Conv, [256, 1, 1]],
|
36 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
37 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
38 |
[-1, 3, BottleneckCSP, [256, False]],
|
39 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 18 (P3/8-small)
|
40 |
+
|
41 |
+
[-2, 1, Conv, [256, 3, 2]],
|
42 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
43 |
+
[-1, 3, BottleneckCSP, [512, False]],
|
44 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 22 (P4/16-medium)
|
45 |
+
|
46 |
+
[-2, 1, Conv, [512, 3, 2]],
|
47 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
48 |
+
[-1, 3, BottleneckCSP, [1024, False]],
|
49 |
+
[-1, 1, nn.Conv2d, [na * (nc + 5), 1, 1]], # 26 (P5/32-large)
|
50 |
|
51 |
+
[[], 1, Detect, [nc, anchors]], # Detect(P5, P4, P3)
|
52 |
]
|
requirements.txt
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
Cython
|
3 |
numpy==1.17
|
4 |
opencv-python
|
5 |
-
torch>=1.
|
6 |
matplotlib
|
7 |
pillow
|
8 |
tensorboard
|
@@ -21,4 +21,4 @@ git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI
|
|
21 |
# conda install -yc conda-forge scikit-image pycocotools tensorboard
|
22 |
# conda install -yc spyder-ide spyder-line-profiler
|
23 |
# conda install -yc pytorch pytorch torchvision
|
24 |
-
# conda install -yc conda-forge protobuf numpy && pip install onnx # https://github.com/onnx/onnx#linux-and-macos
|
|
|
2 |
Cython
|
3 |
numpy==1.17
|
4 |
opencv-python
|
5 |
+
torch>=1.4
|
6 |
matplotlib
|
7 |
pillow
|
8 |
tensorboard
|
|
|
21 |
# conda install -yc conda-forge scikit-image pycocotools tensorboard
|
22 |
# conda install -yc spyder-ide spyder-line-profiler
|
23 |
# conda install -yc pytorch pytorch torchvision
|
24 |
+
# conda install -yc conda-forge protobuf numpy && pip install onnx==1.6.0 # https://github.com/onnx/onnx#linux-and-macos
|
test.py
CHANGED
@@ -1,9 +1,7 @@
|
|
1 |
import argparse
|
2 |
import json
|
3 |
|
4 |
-
import
|
5 |
-
from torch.utils.data import DataLoader
|
6 |
-
|
7 |
from utils.datasets import *
|
8 |
from utils.utils import *
|
9 |
|
@@ -17,16 +15,18 @@ def test(data,
|
|
17 |
save_json=False,
|
18 |
single_cls=False,
|
19 |
augment=False,
|
|
|
20 |
model=None,
|
21 |
dataloader=None,
|
22 |
fast=False,
|
23 |
verbose=False,
|
24 |
-
save_dir='.'
|
|
|
|
|
25 |
# Initialize/load model and set device
|
26 |
if model is None:
|
27 |
training = False
|
28 |
device = torch_utils.select_device(opt.device, batch_size=batch_size)
|
29 |
-
half = device.type != 'cpu' # half precision only supported on CUDA
|
30 |
|
31 |
# Remove previous
|
32 |
for f in glob.glob(f'{save_dir}/test_batch*.jpg'):
|
@@ -38,18 +38,19 @@ def test(data,
|
|
38 |
torch_utils.model_info(model)
|
39 |
model.fuse()
|
40 |
model.to(device)
|
41 |
-
if half:
|
42 |
-
model.half() # to FP16
|
43 |
|
44 |
-
|
45 |
-
|
|
|
46 |
|
47 |
else: # called by train.py
|
48 |
training = True
|
49 |
device = next(model.parameters()).device # get model device
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
53 |
|
54 |
# Configure
|
55 |
model.eval()
|
@@ -57,29 +58,16 @@ def test(data,
|
|
57 |
data = yaml.load(f, Loader=yaml.FullLoader) # model dict
|
58 |
nc = 1 if single_cls else int(data['nc']) # number of classes
|
59 |
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95
|
60 |
-
# iouv = iouv[0].view(1) # comment for [email protected]:0.95
|
61 |
niou = iouv.numel()
|
62 |
|
63 |
# Dataloader
|
64 |
if dataloader is None: # not training
|
|
|
65 |
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
|
66 |
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
|
67 |
-
|
68 |
-
fast |= conf_thres > 0.001 # enable fast mode
|
69 |
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
|
70 |
-
|
71 |
-
|
72 |
-
batch_size,
|
73 |
-
rect=True, # rectangular inference
|
74 |
-
single_cls=opt.single_cls, # single class mode
|
75 |
-
pad=0.5) # padding
|
76 |
-
batch_size = min(batch_size, len(dataset))
|
77 |
-
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
|
78 |
-
dataloader = DataLoader(dataset,
|
79 |
-
batch_size=batch_size,
|
80 |
-
num_workers=nw,
|
81 |
-
pin_memory=True,
|
82 |
-
collate_fn=dataset.collate_fn)
|
83 |
|
84 |
seen = 0
|
85 |
names = model.names if hasattr(model, 'names') else model.module.names
|
@@ -109,7 +97,7 @@ def test(data,
|
|
109 |
|
110 |
# Run NMS
|
111 |
t = torch_utils.time_synchronized()
|
112 |
-
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres,
|
113 |
t1 += torch_utils.time_synchronized() - t
|
114 |
|
115 |
# Statistics per image
|
@@ -235,6 +223,7 @@ def test(data,
|
|
235 |
'See https://github.com/cocodataset/cocoapi/issues/356')
|
236 |
|
237 |
# Return results
|
|
|
238 |
maps = np.zeros(nc) + map
|
239 |
for i, c in enumerate(ap_class):
|
240 |
maps[c] = ap[i]
|
@@ -244,7 +233,7 @@ def test(data,
|
|
244 |
if __name__ == '__main__':
|
245 |
parser = argparse.ArgumentParser(prog='test.py')
|
246 |
parser.add_argument('--weights', type=str, default='weights/yolov5s.pt', help='model.pt path')
|
247 |
-
parser.add_argument('--data', type=str, default='data/
|
248 |
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
|
249 |
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
|
250 |
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
|
@@ -254,6 +243,7 @@ if __name__ == '__main__':
|
|
254 |
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
255 |
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
|
256 |
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
|
|
257 |
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
|
258 |
opt = parser.parse_args()
|
259 |
opt.img_size = check_img_size(opt.img_size)
|
@@ -271,12 +261,13 @@ if __name__ == '__main__':
|
|
271 |
opt.iou_thres,
|
272 |
opt.save_json,
|
273 |
opt.single_cls,
|
274 |
-
opt.augment
|
|
|
275 |
|
276 |
elif opt.task == 'study': # run over a range of settings and save/plot
|
277 |
-
for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
|
278 |
f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
|
279 |
-
x = list(range(
|
280 |
y = [] # y axis
|
281 |
for i in x: # img-size
|
282 |
print('\nRunning %s point %s...' % (f, i))
|
|
|
1 |
import argparse
|
2 |
import json
|
3 |
|
4 |
+
from utils import google_utils
|
|
|
|
|
5 |
from utils.datasets import *
|
6 |
from utils.utils import *
|
7 |
|
|
|
15 |
save_json=False,
|
16 |
single_cls=False,
|
17 |
augment=False,
|
18 |
+
verbose=False,
|
19 |
model=None,
|
20 |
dataloader=None,
|
21 |
fast=False,
|
22 |
verbose=False,
|
23 |
+
save_dir='.',
|
24 |
+
merge=False):
|
25 |
+
|
26 |
# Initialize/load model and set device
|
27 |
if model is None:
|
28 |
training = False
|
29 |
device = torch_utils.select_device(opt.device, batch_size=batch_size)
|
|
|
30 |
|
31 |
# Remove previous
|
32 |
for f in glob.glob(f'{save_dir}/test_batch*.jpg'):
|
|
|
38 |
torch_utils.model_info(model)
|
39 |
model.fuse()
|
40 |
model.to(device)
|
|
|
|
|
41 |
|
42 |
+
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
|
43 |
+
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
|
44 |
+
# model = nn.DataParallel(model)
|
45 |
|
46 |
else: # called by train.py
|
47 |
training = True
|
48 |
device = next(model.parameters()).device # get model device
|
49 |
+
|
50 |
+
# Half
|
51 |
+
half = device.type != 'cpu' and torch.cuda.device_count() == 1 # half precision only supported on single-GPU
|
52 |
+
if half:
|
53 |
+
model.half() # to FP16
|
54 |
|
55 |
# Configure
|
56 |
model.eval()
|
|
|
58 |
data = yaml.load(f, Loader=yaml.FullLoader) # model dict
|
59 |
nc = 1 if single_cls else int(data['nc']) # number of classes
|
60 |
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95
|
|
|
61 |
niou = iouv.numel()
|
62 |
|
63 |
# Dataloader
|
64 |
if dataloader is None: # not training
|
65 |
+
merge = opt.merge # use Merge NMS
|
66 |
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
|
67 |
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
|
|
|
|
|
68 |
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
|
69 |
+
dataloader = create_dataloader(path, imgsz, batch_size, int(max(model.stride)), opt,
|
70 |
+
hyp=None, augment=False, cache=False, pad=0.5, rect=True)[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
seen = 0
|
73 |
names = model.names if hasattr(model, 'names') else model.module.names
|
|
|
97 |
|
98 |
# Run NMS
|
99 |
t = torch_utils.time_synchronized()
|
100 |
+
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, merge=merge)
|
101 |
t1 += torch_utils.time_synchronized() - t
|
102 |
|
103 |
# Statistics per image
|
|
|
223 |
'See https://github.com/cocodataset/cocoapi/issues/356')
|
224 |
|
225 |
# Return results
|
226 |
+
model.float() # for training
|
227 |
maps = np.zeros(nc) + map
|
228 |
for i, c in enumerate(ap_class):
|
229 |
maps[c] = ap[i]
|
|
|
233 |
if __name__ == '__main__':
|
234 |
parser = argparse.ArgumentParser(prog='test.py')
|
235 |
parser.add_argument('--weights', type=str, default='weights/yolov5s.pt', help='model.pt path')
|
236 |
+
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
|
237 |
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
|
238 |
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
|
239 |
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
|
|
|
243 |
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
244 |
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
|
245 |
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
246 |
+
parser.add_argument('--merge', action='store_true', help='use Merge NMS')
|
247 |
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
|
248 |
opt = parser.parse_args()
|
249 |
opt.img_size = check_img_size(opt.img_size)
|
|
|
261 |
opt.iou_thres,
|
262 |
opt.save_json,
|
263 |
opt.single_cls,
|
264 |
+
opt.augment,
|
265 |
+
opt.verbose)
|
266 |
|
267 |
elif opt.task == 'study': # run over a range of settings and save/plot
|
268 |
+
for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov3-spp.pt']:
|
269 |
f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
|
270 |
+
x = list(range(352, 832, 64)) # x axis
|
271 |
y = [] # y axis
|
272 |
for i in x: # img-size
|
273 |
print('\nRunning %s point %s...' % (f, i))
|
train.py
CHANGED
@@ -4,10 +4,12 @@ import torch.distributed as dist
|
|
4 |
import torch.nn.functional as F
|
5 |
import torch.optim as optim
|
6 |
import torch.optim.lr_scheduler as lr_scheduler
|
|
|
7 |
from torch.utils.tensorboard import SummaryWriter
|
8 |
|
9 |
import test # import test.py to get mAP after each epoch
|
10 |
from models.yolo import Model
|
|
|
11 |
from utils.datasets import *
|
12 |
from utils.utils import *
|
13 |
|
@@ -72,6 +74,7 @@ def train(hyp):
|
|
72 |
# Create model
|
73 |
model = Model(opt.cfg).to(device)
|
74 |
assert model.md['nc'] == nc, '%s nc=%g classes but %s nc=%g classes' % (opt.data, nc, opt.cfg, model.md['nc'])
|
|
|
75 |
|
76 |
# Image sizes
|
77 |
gs = int(max(model.stride)) # grid size (max stride)
|
@@ -148,37 +151,17 @@ def train(hyp):
|
|
148 |
world_size=1, # number of nodes
|
149 |
rank=0) # node rank
|
150 |
model = torch.nn.parallel.DistributedDataParallel(model)
|
|
|
151 |
|
152 |
-
#
|
153 |
-
dataset =
|
154 |
-
|
155 |
-
hyp=hyp, # augmentation hyperparameters
|
156 |
-
rect=opt.rect, # rectangular training
|
157 |
-
cache_images=opt.cache_images,
|
158 |
-
single_cls=opt.single_cls)
|
159 |
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
|
160 |
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Correct your labels or your model.' % (mlc, nc, opt.cfg)
|
161 |
|
162 |
-
# Dataloader
|
163 |
-
batch_size = min(batch_size, len(dataset))
|
164 |
-
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
|
165 |
-
dataloader = torch.utils.data.DataLoader(dataset,
|
166 |
-
batch_size=batch_size,
|
167 |
-
num_workers=nw,
|
168 |
-
shuffle=not opt.rect, # Shuffle=True unless rectangular training is used
|
169 |
-
pin_memory=True,
|
170 |
-
collate_fn=dataset.collate_fn)
|
171 |
-
|
172 |
# Testloader
|
173 |
-
testloader =
|
174 |
-
|
175 |
-
rect=True,
|
176 |
-
cache_images=opt.cache_images,
|
177 |
-
single_cls=opt.single_cls),
|
178 |
-
batch_size=batch_size,
|
179 |
-
num_workers=nw,
|
180 |
-
pin_memory=True,
|
181 |
-
collate_fn=dataset.collate_fn)
|
182 |
|
183 |
# Model parameters
|
184 |
hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
|
@@ -186,7 +169,6 @@ def train(hyp):
|
|
186 |
model.hyp = hyp # attach hyperparameters to model
|
187 |
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
|
188 |
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
|
189 |
-
model.names = data_dict['names']
|
190 |
|
191 |
#save hyperparamter and training options in run folder
|
192 |
with open(os.path.join(log_dir, 'hyp.yaml'), 'w') as f:
|
@@ -200,11 +182,17 @@ def train(hyp):
|
|
200 |
c = torch.tensor(labels[:, 0]) # classes
|
201 |
# cf = torch.bincount(c.long(), minlength=nc) + 1.
|
202 |
# model._initialize_biases(cf.to(device))
|
|
|
|
|
203 |
plot_labels(labels, save_dir=log_dir)
|
204 |
-
|
|
|
|
|
|
|
205 |
|
206 |
# Check anchors
|
207 |
-
|
|
|
208 |
|
209 |
# Exponential moving average
|
210 |
ema = torch_utils.ModelEMA(model)
|
@@ -216,7 +204,7 @@ def train(hyp):
|
|
216 |
maps = np.zeros(nc) # mAP per class
|
217 |
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
|
218 |
print('Image sizes %g train, %g test' % (imgsz, imgsz_test))
|
219 |
-
print('Using %g dataloader workers' %
|
220 |
print('Starting training for %g epochs...' % epochs)
|
221 |
# torch.autograd.set_detect_anomaly(True)
|
222 |
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
|
@@ -285,10 +273,10 @@ def train(hyp):
|
|
285 |
|
286 |
# Plot
|
287 |
if ni < 3:
|
288 |
-
f = os.path.join(log_dir, 'train_batch%g.jpg' %
|
289 |
-
|
290 |
-
if tb_writer:
|
291 |
-
tb_writer.add_image(f,
|
292 |
# tb_writer.add_graph(model, imgs) # add model to tensorboard
|
293 |
|
294 |
# end batch ------------------------------------------------------------------------------------------------
|
@@ -307,7 +295,6 @@ def train(hyp):
|
|
307 |
model=ema.ema,
|
308 |
single_cls=opt.single_cls,
|
309 |
dataloader=testloader,
|
310 |
-
fast=epoch < epochs / 2,
|
311 |
save_dir=log_dir)
|
312 |
|
313 |
# Write
|
@@ -362,7 +349,7 @@ def train(hyp):
|
|
362 |
if not opt.evolve:
|
363 |
plot_results(save_dir = log_dir) # save as results.png
|
364 |
print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
|
365 |
-
dist.destroy_process_group() if torch.cuda.device_count() > 1 else None
|
366 |
torch.cuda.empty_cache()
|
367 |
return results
|
368 |
|
@@ -379,6 +366,7 @@ if __name__ == '__main__':
|
|
379 |
parser.add_argument('--rect', action='store_true', help='rectangular training')
|
380 |
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
381 |
parser.add_argument('--notest', action='store_true', help='only test final epoch')
|
|
|
382 |
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
|
383 |
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
|
384 |
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
|
|
|
4 |
import torch.nn.functional as F
|
5 |
import torch.optim as optim
|
6 |
import torch.optim.lr_scheduler as lr_scheduler
|
7 |
+
import torch.utils.data
|
8 |
from torch.utils.tensorboard import SummaryWriter
|
9 |
|
10 |
import test # import test.py to get mAP after each epoch
|
11 |
from models.yolo import Model
|
12 |
+
from utils import google_utils
|
13 |
from utils.datasets import *
|
14 |
from utils.utils import *
|
15 |
|
|
|
74 |
# Create model
|
75 |
model = Model(opt.cfg).to(device)
|
76 |
assert model.md['nc'] == nc, '%s nc=%g classes but %s nc=%g classes' % (opt.data, nc, opt.cfg, model.md['nc'])
|
77 |
+
model.names = data_dict['names']
|
78 |
|
79 |
# Image sizes
|
80 |
gs = int(max(model.stride)) # grid size (max stride)
|
|
|
151 |
world_size=1, # number of nodes
|
152 |
rank=0) # node rank
|
153 |
model = torch.nn.parallel.DistributedDataParallel(model)
|
154 |
+
# pip install torch==1.4.0+cu100 torchvision==0.5.0+cu100 -f https://download.pytorch.org/whl/torch_stable.html
|
155 |
|
156 |
+
# Trainloader
|
157 |
+
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
|
158 |
+
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect)
|
|
|
|
|
|
|
|
|
159 |
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
|
160 |
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Correct your labels or your model.' % (mlc, nc, opt.cfg)
|
161 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
# Testloader
|
163 |
+
testloader = create_dataloader(test_path, imgsz_test, batch_size, gs, opt,
|
164 |
+
hyp=hyp, augment=False, cache=opt.cache_images, rect=True)[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
|
166 |
# Model parameters
|
167 |
hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
|
|
|
169 |
model.hyp = hyp # attach hyperparameters to model
|
170 |
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
|
171 |
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
|
|
|
172 |
|
173 |
#save hyperparamter and training options in run folder
|
174 |
with open(os.path.join(log_dir, 'hyp.yaml'), 'w') as f:
|
|
|
182 |
c = torch.tensor(labels[:, 0]) # classes
|
183 |
# cf = torch.bincount(c.long(), minlength=nc) + 1.
|
184 |
# model._initialize_biases(cf.to(device))
|
185 |
+
|
186 |
+
#always plot labels to log_dir
|
187 |
plot_labels(labels, save_dir=log_dir)
|
188 |
+
|
189 |
+
if tb_writer:
|
190 |
+
tb_writer.add_histogram('classes', c, 0)
|
191 |
+
|
192 |
|
193 |
# Check anchors
|
194 |
+
if not opt.noautoanchor:
|
195 |
+
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
|
196 |
|
197 |
# Exponential moving average
|
198 |
ema = torch_utils.ModelEMA(model)
|
|
|
204 |
maps = np.zeros(nc) # mAP per class
|
205 |
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
|
206 |
print('Image sizes %g train, %g test' % (imgsz, imgsz_test))
|
207 |
+
print('Using %g dataloader workers' % dataloader.num_workers)
|
208 |
print('Starting training for %g epochs...' % epochs)
|
209 |
# torch.autograd.set_detect_anomaly(True)
|
210 |
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
|
|
|
273 |
|
274 |
# Plot
|
275 |
if ni < 3:
|
276 |
+
f = os.path.join(log_dir, 'train_batch%g.jpg' % ni) # filename
|
277 |
+
result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
|
278 |
+
if tb_writer and result is not None:
|
279 |
+
tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
|
280 |
# tb_writer.add_graph(model, imgs) # add model to tensorboard
|
281 |
|
282 |
# end batch ------------------------------------------------------------------------------------------------
|
|
|
295 |
model=ema.ema,
|
296 |
single_cls=opt.single_cls,
|
297 |
dataloader=testloader,
|
|
|
298 |
save_dir=log_dir)
|
299 |
|
300 |
# Write
|
|
|
349 |
if not opt.evolve:
|
350 |
plot_results(save_dir = log_dir) # save as results.png
|
351 |
print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
|
352 |
+
dist.destroy_process_group() if device.type != 'cpu' and torch.cuda.device_count() > 1 else None
|
353 |
torch.cuda.empty_cache()
|
354 |
return results
|
355 |
|
|
|
366 |
parser.add_argument('--rect', action='store_true', help='rectangular training')
|
367 |
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
368 |
parser.add_argument('--notest', action='store_true', help='only test final epoch')
|
369 |
+
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
|
370 |
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
|
371 |
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
|
372 |
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
|
utils/datasets.py
CHANGED
@@ -18,7 +18,7 @@ from utils.utils import xyxy2xywh, xywh2xyxy
|
|
18 |
|
19 |
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
|
20 |
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
|
21 |
-
vid_formats = ['.mov', '.avi', '.mp4']
|
22 |
|
23 |
# Get orientation exif tag
|
24 |
for orientation in ExifTags.TAGS.keys():
|
@@ -41,6 +41,26 @@ def exif_size(img):
|
|
41 |
return s
|
42 |
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
class LoadImages: # for inference
|
45 |
def __init__(self, path, img_size=416):
|
46 |
path = str(Path(path)) # os-agnostic
|
@@ -63,7 +83,8 @@ class LoadImages: # for inference
|
|
63 |
self.new_video(videos[0]) # new video
|
64 |
else:
|
65 |
self.cap = None
|
66 |
-
assert self.nF > 0, 'No images or videos found in '
|
|
|
67 |
|
68 |
def __iter__(self):
|
69 |
self.count = 0
|
@@ -257,7 +278,7 @@ class LoadStreams: # multiple IP or RTSP cameras
|
|
257 |
|
258 |
class LoadImagesAndLabels(Dataset): # for training/testing
|
259 |
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
|
260 |
-
cache_images=False, single_cls=False, pad=0.0):
|
261 |
try:
|
262 |
path = str(Path(path)) # os-agnostic
|
263 |
parent = str(Path(path).parent) + os.sep
|
@@ -324,7 +345,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
|
|
324 |
elif mini > 1:
|
325 |
shapes[i] = [1, 1 / mini]
|
326 |
|
327 |
-
self.batch_shapes = np.ceil(np.array(shapes) * img_size /
|
328 |
|
329 |
# Cache labels
|
330 |
self.imgs = [None] * n
|
@@ -711,7 +732,7 @@ def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10,
|
|
711 |
area = w * h
|
712 |
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
|
713 |
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
|
714 |
-
i = (w >
|
715 |
|
716 |
targets = targets[i]
|
717 |
targets[:, 1:5] = xy[i]
|
|
|
18 |
|
19 |
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
|
20 |
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
|
21 |
+
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
|
22 |
|
23 |
# Get orientation exif tag
|
24 |
for orientation in ExifTags.TAGS.keys():
|
|
|
41 |
return s
|
42 |
|
43 |
|
44 |
+
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False):
|
45 |
+
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
|
46 |
+
augment=augment, # augment images
|
47 |
+
hyp=hyp, # augmentation hyperparameters
|
48 |
+
rect=rect, # rectangular training
|
49 |
+
cache_images=cache,
|
50 |
+
single_cls=opt.single_cls,
|
51 |
+
stride=stride,
|
52 |
+
pad=pad)
|
53 |
+
|
54 |
+
batch_size = min(batch_size, len(dataset))
|
55 |
+
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
|
56 |
+
dataloader = torch.utils.data.DataLoader(dataset,
|
57 |
+
batch_size=batch_size,
|
58 |
+
num_workers=nw,
|
59 |
+
pin_memory=True,
|
60 |
+
collate_fn=LoadImagesAndLabels.collate_fn)
|
61 |
+
return dataloader, dataset
|
62 |
+
|
63 |
+
|
64 |
class LoadImages: # for inference
|
65 |
def __init__(self, path, img_size=416):
|
66 |
path = str(Path(path)) # os-agnostic
|
|
|
83 |
self.new_video(videos[0]) # new video
|
84 |
else:
|
85 |
self.cap = None
|
86 |
+
assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
|
87 |
+
(path, img_formats, vid_formats)
|
88 |
|
89 |
def __iter__(self):
|
90 |
self.count = 0
|
|
|
278 |
|
279 |
class LoadImagesAndLabels(Dataset): # for training/testing
|
280 |
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
|
281 |
+
cache_images=False, single_cls=False, stride=32, pad=0.0):
|
282 |
try:
|
283 |
path = str(Path(path)) # os-agnostic
|
284 |
parent = str(Path(path).parent) + os.sep
|
|
|
345 |
elif mini > 1:
|
346 |
shapes[i] = [1, 1 / mini]
|
347 |
|
348 |
+
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
|
349 |
|
350 |
# Cache labels
|
351 |
self.imgs = [None] * n
|
|
|
732 |
area = w * h
|
733 |
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
|
734 |
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
|
735 |
+
i = (w > 2) & (h > 2) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 20)
|
736 |
|
737 |
targets = targets[i]
|
738 |
targets[:, 1:5] = xy[i]
|
utils/torch_utils.py
CHANGED
@@ -7,6 +7,7 @@ import torch
|
|
7 |
import torch.backends.cudnn as cudnn
|
8 |
import torch.nn as nn
|
9 |
import torch.nn.functional as F
|
|
|
10 |
|
11 |
|
12 |
def init_seeds(seed=0):
|
@@ -120,18 +121,22 @@ def model_info(model, verbose=False):
|
|
120 |
|
121 |
def load_classifier(name='resnet101', n=2):
|
122 |
# Loads a pretrained model reshaped to n-class output
|
123 |
-
|
124 |
-
model = pretrainedmodels.__dict__[name](num_classes=1000, pretrained='imagenet')
|
125 |
|
126 |
# Display model properties
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
128 |
print(x + ' =', eval(x))
|
129 |
|
130 |
# Reshape output to n classes
|
131 |
-
filters = model.
|
132 |
-
model.
|
133 |
-
model.
|
134 |
-
model.
|
135 |
return model
|
136 |
|
137 |
|
|
|
7 |
import torch.backends.cudnn as cudnn
|
8 |
import torch.nn as nn
|
9 |
import torch.nn.functional as F
|
10 |
+
import torchvision.models as models
|
11 |
|
12 |
|
13 |
def init_seeds(seed=0):
|
|
|
121 |
|
122 |
def load_classifier(name='resnet101', n=2):
|
123 |
# Loads a pretrained model reshaped to n-class output
|
124 |
+
model = models.__dict__[name](pretrained=True)
|
|
|
125 |
|
126 |
# Display model properties
|
127 |
+
input_size = [3, 224, 224]
|
128 |
+
input_space = 'RGB'
|
129 |
+
input_range = [0, 1]
|
130 |
+
mean = [0.485, 0.456, 0.406]
|
131 |
+
std = [0.229, 0.224, 0.225]
|
132 |
+
for x in [input_size, input_space, input_range, mean, std]:
|
133 |
print(x + ' =', eval(x))
|
134 |
|
135 |
# Reshape output to n classes
|
136 |
+
filters = model.fc.weight.shape[1]
|
137 |
+
model.fc.bias = torch.nn.Parameter(torch.zeros(n), requires_grad=True)
|
138 |
+
model.fc.weight = torch.nn.Parameter(torch.zeros(n, filters), requires_grad=True)
|
139 |
+
model.fc.out_features = n
|
140 |
return model
|
141 |
|
142 |
|
utils/utils.py
CHANGED
@@ -20,7 +20,7 @@ import yaml
|
|
20 |
from scipy.signal import butter, filtfilt
|
21 |
from tqdm import tqdm
|
22 |
|
23 |
-
from . import torch_utils
|
24 |
|
25 |
# Set printoptions
|
26 |
torch.set_printoptions(linewidth=320, precision=5, profile='long')
|
@@ -53,24 +53,52 @@ def check_git_status():
|
|
53 |
|
54 |
def check_img_size(img_size, s=32):
|
55 |
# Verify img_size is a multiple of stride s
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
59 |
|
60 |
|
61 |
-
def
|
62 |
-
# Check
|
|
|
|
|
63 |
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
|
76 |
def check_file(file):
|
@@ -517,11 +545,11 @@ def build_targets(p, targets, model):
|
|
517 |
return tcls, tbox, indices, anch
|
518 |
|
519 |
|
520 |
-
def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6,
|
521 |
-
"""
|
522 |
-
|
523 |
-
Returns
|
524 |
-
|
525 |
"""
|
526 |
if prediction.dtype is torch.float16:
|
527 |
prediction = prediction.float() # to FP32
|
@@ -534,12 +562,7 @@ def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, fast=False, c
|
|
534 |
max_det = 300 # maximum number of detections per image
|
535 |
time_limit = 10.0 # seconds to quit after
|
536 |
redundant = True # require redundant detections
|
537 |
-
fast |= conf_thres > 0.001 # fast mode
|
538 |
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
|
539 |
-
if fast:
|
540 |
-
merge = False
|
541 |
-
else:
|
542 |
-
merge = True # merge for best mAP (adds 0.5ms/img)
|
543 |
|
544 |
t = time.time()
|
545 |
output = [None] * prediction.shape[0]
|
@@ -610,24 +633,24 @@ def strip_optimizer(f='weights/best.pt'): # from utils.utils import *; strip_op
|
|
610 |
# Strip optimizer from *.pt files for lighter files (reduced by 1/2 size)
|
611 |
x = torch.load(f, map_location=torch.device('cpu'))
|
612 |
x['optimizer'] = None
|
|
|
613 |
torch.save(x, f)
|
614 |
print('Optimizer stripped from %s' % f)
|
615 |
|
616 |
|
617 |
-
def
|
618 |
-
# create
|
619 |
device = torch.device('cpu')
|
620 |
-
x = torch.load(f, map_location=device)
|
621 |
-
torch.save(x, s) # update model if SourceChangeWarning
|
622 |
x = torch.load(s, map_location=device)
|
623 |
|
624 |
x['optimizer'] = None
|
625 |
x['training_results'] = None
|
626 |
x['epoch'] = -1
|
|
|
627 |
for p in x['model'].parameters():
|
628 |
p.requires_grad = True
|
629 |
torch.save(x, s)
|
630 |
-
print('%s
|
631 |
|
632 |
|
633 |
def coco_class_count(path='../coco/labels/train2014/'):
|
@@ -695,14 +718,14 @@ def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
|
|
695 |
shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
|
696 |
|
697 |
|
698 |
-
def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=
|
699 |
""" Creates kmeans-evolved anchors from training dataset
|
700 |
|
701 |
Arguments:
|
702 |
-
path: path to dataset *.yaml
|
703 |
n: number of anchors
|
704 |
-
img_size:
|
705 |
-
thr:
|
706 |
gen: generations to evolve anchors using genetic algorithm
|
707 |
|
708 |
Return:
|
@@ -711,52 +734,47 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=(640, 640), thr=0.20
|
|
711 |
Usage:
|
712 |
from utils.utils import *; _ = kmean_anchors()
|
713 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
714 |
|
715 |
-
|
|
|
|
|
716 |
|
717 |
def print_results(k):
|
718 |
k = k[np.argsort(k.prod(1))] # sort small to large
|
719 |
-
|
720 |
-
|
721 |
-
|
722 |
-
|
723 |
-
|
724 |
-
# r = wh[:, None] / k[None]
|
725 |
-
# ar = torch.max(r, 1. / r).max(2)[0]
|
726 |
-
# max_ar = ar.min(1)[0]
|
727 |
-
# bpr, aat = (max_ar < thr).float().mean(), (ar < thr).float().mean() * n # best possible recall, anch > thr
|
728 |
-
|
729 |
-
print('%.2f iou_thr: %.3f best possible recall, %.2f anchors > thr' % (thr, bpr, aat))
|
730 |
-
print('n=%g, img_size=%s, IoU_all=%.3f/%.3f-mean/best, IoU>thr=%.3f-mean: ' %
|
731 |
-
(n, img_size, iou.mean(), max_iou.mean(), iou[iou > thr].mean()), end='')
|
732 |
for i, x in enumerate(k):
|
733 |
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
|
734 |
return k
|
735 |
|
736 |
-
|
737 |
-
|
738 |
-
|
739 |
-
|
740 |
-
|
741 |
-
|
742 |
-
|
743 |
-
# r = wh[:, None] / k[None]
|
744 |
-
# x = torch.max(r, 1. / r).max(2)[0]
|
745 |
-
# m = x.min(1)[0]
|
746 |
-
# return 1. / (m * (m < 5).float()).mean() # product
|
747 |
|
748 |
# Get label wh
|
749 |
-
|
750 |
-
|
751 |
-
|
752 |
-
|
753 |
-
|
754 |
-
|
755 |
-
|
756 |
-
|
757 |
-
wh =
|
758 |
-
# wh *= np.random.uniform(img_size[0], img_size[1], size=(wh.shape[0], 1)) # normalized to pixels (multi-scale)
|
759 |
-
wh = wh[(wh > 2.0).all(1)] # remove below threshold boxes (< 2 pixels wh)
|
760 |
|
761 |
# Kmeans calculation
|
762 |
from scipy.cluster.vq import kmeans
|
@@ -764,10 +782,11 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=(640, 640), thr=0.20
|
|
764 |
s = wh.std(0) # sigmas for whitening
|
765 |
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
|
766 |
k *= s
|
767 |
-
wh = torch.
|
|
|
768 |
k = print_results(k)
|
769 |
|
770 |
-
#
|
771 |
# k, d = [None] * 20, [None] * 20
|
772 |
# for i in tqdm(range(1, 21)):
|
773 |
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
|
@@ -783,7 +802,8 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=(640, 640), thr=0.20
|
|
783 |
# Evolve
|
784 |
npr = np.random
|
785 |
f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
|
786 |
-
|
|
|
787 |
v = np.ones(sh)
|
788 |
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
|
789 |
v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
|
@@ -791,9 +811,11 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=(640, 640), thr=0.20
|
|
791 |
fg = fitness(kg)
|
792 |
if fg > f:
|
793 |
f, k = fg, kg.copy()
|
794 |
-
|
795 |
-
|
796 |
-
|
|
|
|
|
797 |
|
798 |
|
799 |
def print_mutation(hyp, results, bucket=''):
|
@@ -1078,12 +1100,14 @@ def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_st
|
|
1078 |
|
1079 |
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [33.5, 39.1, 42.5, 45.9, 49., 50.5],
|
1080 |
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
|
|
|
|
|
1081 |
ax2.set_xlim(0, 30)
|
1082 |
-
ax2.set_ylim(
|
1083 |
-
ax2.
|
|
|
1084 |
ax2.set_ylabel('COCO AP val')
|
1085 |
ax2.legend(loc='lower right')
|
1086 |
-
ax2.grid()
|
1087 |
plt.savefig('study_mAP_latency.png', dpi=300)
|
1088 |
plt.savefig(f.replace('.txt', '.png'), dpi=200)
|
1089 |
|
@@ -1110,6 +1134,7 @@ def plot_labels(labels, save_dir= '.'):
|
|
1110 |
ax[2].set_xlabel('width')
|
1111 |
ax[2].set_ylabel('height')
|
1112 |
plt.savefig(os.path.join(save_dir,'labels.png'), dpi=200)
|
|
|
1113 |
|
1114 |
|
1115 |
def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
|
|
|
20 |
from scipy.signal import butter, filtfilt
|
21 |
from tqdm import tqdm
|
22 |
|
23 |
+
from . import torch_utils # torch_utils, google_utils
|
24 |
|
25 |
# Set printoptions
|
26 |
torch.set_printoptions(linewidth=320, precision=5, profile='long')
|
|
|
53 |
|
54 |
def check_img_size(img_size, s=32):
|
55 |
# Verify img_size is a multiple of stride s
|
56 |
+
new_size = make_divisible(img_size, s) # ceil gs-multiple
|
57 |
+
if new_size != img_size:
|
58 |
+
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
|
59 |
+
return new_size
|
60 |
|
61 |
|
62 |
+
def check_anchors(dataset, model, thr=4.0, imgsz=640):
|
63 |
+
# Check anchor fit to data, recompute if necessary
|
64 |
+
print('\nAnalyzing anchors... ', end='')
|
65 |
+
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
|
66 |
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
67 |
+
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
|
68 |
+
wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
|
69 |
+
|
70 |
+
def metric(k): # compute metric
|
71 |
+
r = wh[:, None] / k[None]
|
72 |
+
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
|
73 |
+
best = x.max(1)[0] # best_x
|
74 |
+
return (best > 1. / thr).float().mean() # best possible recall
|
75 |
+
|
76 |
+
bpr = metric(m.anchor_grid.clone().cpu().view(-1, 2))
|
77 |
+
print('Best Possible Recall (BPR) = %.4f' % bpr, end='')
|
78 |
+
if bpr < 0.99: # threshold to recompute
|
79 |
+
print('. Attempting to generate improved anchors, please wait...' % bpr)
|
80 |
+
na = m.anchor_grid.numel() // 2 # number of anchors
|
81 |
+
new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
|
82 |
+
new_bpr = metric(new_anchors.reshape(-1, 2))
|
83 |
+
if new_bpr > bpr: # replace anchors
|
84 |
+
new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
|
85 |
+
m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
|
86 |
+
m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
|
87 |
+
check_anchor_order(m)
|
88 |
+
print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
|
89 |
+
else:
|
90 |
+
print('Original anchors better than new anchors. Proceeding with original anchors.')
|
91 |
+
print('') # newline
|
92 |
+
|
93 |
|
94 |
+
def check_anchor_order(m):
|
95 |
+
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
|
96 |
+
a = m.anchor_grid.prod(-1).view(-1) # anchor area
|
97 |
+
da = a[-1] - a[0] # delta a
|
98 |
+
ds = m.stride[-1] - m.stride[0] # delta s
|
99 |
+
if da.sign() != ds.sign(): # same order
|
100 |
+
m.anchors[:] = m.anchors.flip(0)
|
101 |
+
m.anchor_grid[:] = m.anchor_grid.flip(0)
|
102 |
|
103 |
|
104 |
def check_file(file):
|
|
|
545 |
return tcls, tbox, indices, anch
|
546 |
|
547 |
|
548 |
+
def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):
|
549 |
+
"""Performs Non-Maximum Suppression (NMS) on inference results
|
550 |
+
|
551 |
+
Returns:
|
552 |
+
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
|
553 |
"""
|
554 |
if prediction.dtype is torch.float16:
|
555 |
prediction = prediction.float() # to FP32
|
|
|
562 |
max_det = 300 # maximum number of detections per image
|
563 |
time_limit = 10.0 # seconds to quit after
|
564 |
redundant = True # require redundant detections
|
|
|
565 |
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
|
|
|
|
|
|
|
|
|
566 |
|
567 |
t = time.time()
|
568 |
output = [None] * prediction.shape[0]
|
|
|
633 |
# Strip optimizer from *.pt files for lighter files (reduced by 1/2 size)
|
634 |
x = torch.load(f, map_location=torch.device('cpu'))
|
635 |
x['optimizer'] = None
|
636 |
+
x['model'].half() # to FP16
|
637 |
torch.save(x, f)
|
638 |
print('Optimizer stripped from %s' % f)
|
639 |
|
640 |
|
641 |
+
def create_pretrained(f='weights/best.pt', s='weights/pretrained.pt'): # from utils.utils import *; create_pretrained()
|
642 |
+
# create pretrained checkpoint 's' from 'f' (create_pretrained(x, x) for x in glob.glob('./*.pt'))
|
643 |
device = torch.device('cpu')
|
|
|
|
|
644 |
x = torch.load(s, map_location=device)
|
645 |
|
646 |
x['optimizer'] = None
|
647 |
x['training_results'] = None
|
648 |
x['epoch'] = -1
|
649 |
+
x['model'].half() # to FP16
|
650 |
for p in x['model'].parameters():
|
651 |
p.requires_grad = True
|
652 |
torch.save(x, s)
|
653 |
+
print('%s saved as pretrained checkpoint %s' % (f, s))
|
654 |
|
655 |
|
656 |
def coco_class_count(path='../coco/labels/train2014/'):
|
|
|
718 |
shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
|
719 |
|
720 |
|
721 |
+
def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
|
722 |
""" Creates kmeans-evolved anchors from training dataset
|
723 |
|
724 |
Arguments:
|
725 |
+
path: path to dataset *.yaml, or a loaded dataset
|
726 |
n: number of anchors
|
727 |
+
img_size: image size used for training
|
728 |
+
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
|
729 |
gen: generations to evolve anchors using genetic algorithm
|
730 |
|
731 |
Return:
|
|
|
734 |
Usage:
|
735 |
from utils.utils import *; _ = kmean_anchors()
|
736 |
"""
|
737 |
+
thr = 1. / thr
|
738 |
+
|
739 |
+
def metric(k, wh): # compute metrics
|
740 |
+
r = wh[:, None] / k[None]
|
741 |
+
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
|
742 |
+
# x = wh_iou(wh, torch.tensor(k)) # iou metric
|
743 |
+
return x, x.max(1)[0] # x, best_x
|
744 |
|
745 |
+
def fitness(k): # mutation fitness
|
746 |
+
_, best = metric(torch.tensor(k, dtype=torch.float32), wh)
|
747 |
+
return (best * (best > thr).float()).mean() # fitness
|
748 |
|
749 |
def print_results(k):
|
750 |
k = k[np.argsort(k.prod(1))] # sort small to large
|
751 |
+
x, best = metric(k, wh0)
|
752 |
+
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
|
753 |
+
print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
|
754 |
+
print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
|
755 |
+
(n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
756 |
for i, x in enumerate(k):
|
757 |
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
|
758 |
return k
|
759 |
|
760 |
+
if isinstance(path, str): # *.yaml file
|
761 |
+
with open(path) as f:
|
762 |
+
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
|
763 |
+
from utils.datasets import LoadImagesAndLabels
|
764 |
+
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
|
765 |
+
else:
|
766 |
+
dataset = path # dataset
|
|
|
|
|
|
|
|
|
767 |
|
768 |
# Get label wh
|
769 |
+
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
770 |
+
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
|
771 |
+
|
772 |
+
# Filter
|
773 |
+
i = (wh0 < 4.0).any(1).sum()
|
774 |
+
if i:
|
775 |
+
print('WARNING: Extremely small objects found. '
|
776 |
+
'%g of %g labels are < 4 pixels in width or height.' % (i, len(wh0)))
|
777 |
+
wh = wh0[(wh0 >= 4.0).any(1)] # filter > 2 pixels
|
|
|
|
|
778 |
|
779 |
# Kmeans calculation
|
780 |
from scipy.cluster.vq import kmeans
|
|
|
782 |
s = wh.std(0) # sigmas for whitening
|
783 |
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
|
784 |
k *= s
|
785 |
+
wh = torch.tensor(wh, dtype=torch.float32) # filtered
|
786 |
+
wh0 = torch.tensor(wh0, dtype=torch.float32) # unflitered
|
787 |
k = print_results(k)
|
788 |
|
789 |
+
# Plot
|
790 |
# k, d = [None] * 20, [None] * 20
|
791 |
# for i in tqdm(range(1, 21)):
|
792 |
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
|
|
|
802 |
# Evolve
|
803 |
npr = np.random
|
804 |
f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
|
805 |
+
pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
|
806 |
+
for _ in pbar:
|
807 |
v = np.ones(sh)
|
808 |
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
|
809 |
v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
|
|
|
811 |
fg = fitness(kg)
|
812 |
if fg > f:
|
813 |
f, k = fg, kg.copy()
|
814 |
+
pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
|
815 |
+
if verbose:
|
816 |
+
print_results(k)
|
817 |
+
|
818 |
+
return print_results(k)
|
819 |
|
820 |
|
821 |
def print_mutation(hyp, results, bucket=''):
|
|
|
1100 |
|
1101 |
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [33.5, 39.1, 42.5, 45.9, 49., 50.5],
|
1102 |
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
|
1103 |
+
|
1104 |
+
ax2.grid()
|
1105 |
ax2.set_xlim(0, 30)
|
1106 |
+
ax2.set_ylim(28, 50)
|
1107 |
+
ax2.set_yticks(np.arange(30, 55, 5))
|
1108 |
+
ax2.set_xlabel('GPU Speed (ms/img)')
|
1109 |
ax2.set_ylabel('COCO AP val')
|
1110 |
ax2.legend(loc='lower right')
|
|
|
1111 |
plt.savefig('study_mAP_latency.png', dpi=300)
|
1112 |
plt.savefig(f.replace('.txt', '.png'), dpi=200)
|
1113 |
|
|
|
1134 |
ax[2].set_xlabel('width')
|
1135 |
ax[2].set_ylabel('height')
|
1136 |
plt.savefig(os.path.join(save_dir,'labels.png'), dpi=200)
|
1137 |
+
plt.close()
|
1138 |
|
1139 |
|
1140 |
def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
|