glenn-jocher commited on
Commit
ebe563e
·
1 Parent(s): 9ac9ec3

Created using Colaboratory

Browse files
Files changed (1) hide show
  1. tutorial.ipynb +13 -12
tutorial.ipynb CHANGED
@@ -564,7 +564,7 @@
564
  "clear_output()\n",
565
  "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))"
566
  ],
567
- "execution_count": 1,
568
  "outputs": [
569
  {
570
  "output_type": "stream",
@@ -600,7 +600,7 @@
600
  "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source inference/images/\n",
601
  "Image(filename='inference/output/zidane.jpg', width=600)"
602
  ],
603
- "execution_count": 38,
604
  "outputs": [
605
  {
606
  "output_type": "stream",
@@ -641,7 +641,7 @@
641
  "id": "4qbaa3iEcrcE"
642
  },
643
  "source": [
644
- "Available inference sources:\n",
645
  "<img src=\"https://user-images.githubusercontent.com/26833433/98274798-2b7a7a80-1f94-11eb-91a4-70c73593e26b.jpg\" width=\"900\"> "
646
  ]
647
  },
@@ -690,7 +690,7 @@
690
  "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
691
  "!unzip -q tmp.zip -d ../ && rm tmp.zip"
692
  ],
693
- "execution_count": 16,
694
  "outputs": [
695
  {
696
  "output_type": "display_data",
@@ -730,7 +730,7 @@
730
  "# Run YOLOv5x on COCO val2017\n",
731
  "!python test.py --weights yolov5x.pt --data coco.yaml --img 640"
732
  ],
733
- "execution_count": 17,
734
  "outputs": [
735
  {
736
  "output_type": "stream",
@@ -797,9 +797,10 @@
797
  },
798
  "source": [
799
  "# Download COCO test-dev2017\n",
800
- "gdrive_download('1cXZR_ckHki6nddOmcysCuuJFM--T-Q6L','coco2017labels.zip') # annotations\n",
 
801
  "!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n",
802
- "!mv ./test2017 ./coco/images && mv ./coco ../ # move images into /coco and move /coco alongside /yolov5"
803
  ],
804
  "execution_count": null,
805
  "outputs": []
@@ -852,7 +853,7 @@
852
  "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n",
853
  "!unzip -q tmp.zip -d ../ && rm tmp.zip"
854
  ],
855
- "execution_count": 22,
856
  "outputs": [
857
  {
858
  "output_type": "display_data",
@@ -916,7 +917,7 @@
916
  "# Train YOLOv5s on COCO128 for 3 epochs\n",
917
  "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache"
918
  ],
919
- "execution_count": 23,
920
  "outputs": [
921
  {
922
  "output_type": "stream",
@@ -1023,7 +1024,7 @@
1023
  "source": [
1024
  "## 4.2 Local Logging\n",
1025
  "\n",
1026
- "All results are logged by default to the `runs/exp0` directory, with a new directory created for each new training as `runs/exp1`, `runs/exp2`, etc. View `train_batch*.jpg` to see training images, labels and augmentation effects. A **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)."
1027
  ]
1028
  },
1029
  {
@@ -1046,7 +1047,7 @@
1046
  },
1047
  "source": [
1048
  "> <img src=\"https://user-images.githubusercontent.com/26833433/83667642-90fcb200-a583-11ea-8fa3-338bbf7da194.jpeg\" width=\"750\"> \n",
1049
- "`test_batch0_gt.jpg` train batch 0 mosaics and labels\n",
1050
  "\n",
1051
  "> <img src=\"https://user-images.githubusercontent.com/26833433/83667626-8c37fe00-a583-11ea-997b-0923fe59b29b.jpeg\" width=\"750\"> \n",
1052
  "`test_batch0_gt.jpg` shows test batch 0 ground truth\n",
@@ -1061,7 +1062,7 @@
1061
  "id": "7KN5ghjE6ZWh"
1062
  },
1063
  "source": [
1064
- "Training losses and performance metrics are also logged to Tensorboard and a custom `runs/exp0/results.txt` logfile. `results.txt` is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `yolov5s.pt` (orange)."
1065
  ]
1066
  },
1067
  {
 
564
  "clear_output()\n",
565
  "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))"
566
  ],
567
+ "execution_count": null,
568
  "outputs": [
569
  {
570
  "output_type": "stream",
 
600
  "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source inference/images/\n",
601
  "Image(filename='inference/output/zidane.jpg', width=600)"
602
  ],
603
+ "execution_count": null,
604
  "outputs": [
605
  {
606
  "output_type": "stream",
 
641
  "id": "4qbaa3iEcrcE"
642
  },
643
  "source": [
644
+ "Results are saved to `inference/output`. A full list of available inference sources:\n",
645
  "<img src=\"https://user-images.githubusercontent.com/26833433/98274798-2b7a7a80-1f94-11eb-91a4-70c73593e26b.jpg\" width=\"900\"> "
646
  ]
647
  },
 
690
  "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
691
  "!unzip -q tmp.zip -d ../ && rm tmp.zip"
692
  ],
693
+ "execution_count": null,
694
  "outputs": [
695
  {
696
  "output_type": "display_data",
 
730
  "# Run YOLOv5x on COCO val2017\n",
731
  "!python test.py --weights yolov5x.pt --data coco.yaml --img 640"
732
  ],
733
+ "execution_count": null,
734
  "outputs": [
735
  {
736
  "output_type": "stream",
 
797
  },
798
  "source": [
799
  "# Download COCO test-dev2017\n",
800
+ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n",
801
+ "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n",
802
  "!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n",
803
+ "%mv ./test2017 ./coco/images && mv ./coco ../ # move images to /coco and move /coco next to /yolov5"
804
  ],
805
  "execution_count": null,
806
  "outputs": []
 
853
  "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n",
854
  "!unzip -q tmp.zip -d ../ && rm tmp.zip"
855
  ],
856
+ "execution_count": null,
857
  "outputs": [
858
  {
859
  "output_type": "display_data",
 
917
  "# Train YOLOv5s on COCO128 for 3 epochs\n",
918
  "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache"
919
  ],
920
+ "execution_count": null,
921
  "outputs": [
922
  {
923
  "output_type": "stream",
 
1024
  "source": [
1025
  "## 4.2 Local Logging\n",
1026
  "\n",
1027
+ "All results are logged by default to the `runs/exp0` directory, with a new directory created for each new training as `runs/exp1`, `runs/exp2`, etc. View train and test jpgs to see mosaics, labels/predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)."
1028
  ]
1029
  },
1030
  {
 
1047
  },
1048
  "source": [
1049
  "> <img src=\"https://user-images.githubusercontent.com/26833433/83667642-90fcb200-a583-11ea-8fa3-338bbf7da194.jpeg\" width=\"750\"> \n",
1050
+ "`train_batch0.jpg` train batch 0 mosaics and labels\n",
1051
  "\n",
1052
  "> <img src=\"https://user-images.githubusercontent.com/26833433/83667626-8c37fe00-a583-11ea-997b-0923fe59b29b.jpeg\" width=\"750\"> \n",
1053
  "`test_batch0_gt.jpg` shows test batch 0 ground truth\n",
 
1062
  "id": "7KN5ghjE6ZWh"
1063
  },
1064
  "source": [
1065
+ "Training losses and performance metrics are also logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and a custom `results.txt` logfile which is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `--weights yolov5s.pt` (orange)."
1066
  ]
1067
  },
1068
  {